diff --git a/datasets/dev/build-flwr-datasets-docs.sh b/datasets/dev/build-flwr-datasets-docs.sh new file mode 100755 index 000000000000..dc3cd979d5c8 --- /dev/null +++ b/datasets/dev/build-flwr-datasets-docs.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Generating the docs, rename and move the files such that the meet the convention used in Flower. +# Note that it involves two runs of sphinx-build that are necessary. +# The first run generates the .rst files (and the html files that are discarded) +# The second time it is run after the files are renamed and moved to the correct place. It generates the final htmls. + +set -e + +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/../doc + +# Remove the old docs from source/ref-api +REF_API_DIR="source/ref-api" +if [[ -d "$REF_API_DIR" ]]; then + + echo "Removing ${REF_API_DIR}" + rm -r ${REF_API_DIR} +fi + +# Remove the old html files +if [[ -d build ]]; then + echo "Removing ./build" + rm -r build +fi + +# Docs generation: Generate new rst files +# It starts at the __init__ in the main directory and recursively generated the documentation for the +# specified classes/modules/packages specified in __all__. +# Note if a package cannot be reach via the recursive traversal, even if it has __all__, it won't be documented. +echo "Generating the docs based on only the functionality given in the __all__." +sphinx-build -M html source build diff --git a/datasets/doc/source/_templates/autosummary/class.rst b/datasets/doc/source/_templates/autosummary/class.rst new file mode 100644 index 000000000000..b4b35789bc6f --- /dev/null +++ b/datasets/doc/source/_templates/autosummary/class.rst @@ -0,0 +1,33 @@ +{{ name | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }} + :members: + :show-inheritance: + :inherited-members: + + {% block methods %} + + {% if methods %} + .. rubric:: {{ _('Methods') }} + + .. autosummary:: + {% for item in methods %} + {% if item != "__init__" %} + ~{{ name }}.{{ item }} + {% endif %} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block attributes %} + {% if attributes %} + .. rubric:: {{ _('Attributes') }} + + .. autosummary:: + {% for item in attributes %} + ~{{ name }}.{{ item }} + {%- endfor %} + {% endif %} + {% endblock %} diff --git a/datasets/doc/source/_templates/autosummary/module.rst b/datasets/doc/source/_templates/autosummary/module.rst new file mode 100644 index 000000000000..571db198d27c --- /dev/null +++ b/datasets/doc/source/_templates/autosummary/module.rst @@ -0,0 +1,66 @@ +{{ name | escape | underline}} + +.. automodule:: {{ fullname }} + + {% block attributes %} + {% if attributes %} + .. rubric:: Module Attributes + + .. autosummary:: + :toctree: + {% for item in attributes %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block functions %} + {% if functions %} + .. rubric:: {{ _('Functions') }} + + .. autosummary:: + :toctree: + {% for item in functions %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block classes %} + {% if classes %} + .. rubric:: {{ _('Classes') }} + + .. autosummary:: + :toctree: + :template: autosummary/class.rst + {% for item in classes %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + + {% block exceptions %} + {% if exceptions %} + .. rubric:: {{ _('Exceptions') }} + + .. autosummary:: + :toctree: + {% for item in exceptions %} + {{ item }} + {%- endfor %} + {% endif %} + {% endblock %} + +{% block modules %} +{% if modules %} +.. rubric:: Modules + +.. autosummary:: + :toctree: + :template: autosummary/module.rst + :recursive: +{% for item in modules %} + {{ item }} +{%- endfor %} +{% endif %} +{% endblock %} diff --git a/datasets/doc/source/conf.py b/datasets/doc/source/conf.py index 4fccaf0ef084..32baa6dd1471 100644 --- a/datasets/doc/source/conf.py +++ b/datasets/doc/source/conf.py @@ -61,8 +61,42 @@ "nbsphinx", ] +# Generate .rst files autosummary_generate = True +# Document ONLY the objects from __all__ (present in __init__ files). +# It will be done recursively starting from flwr_dataset.__init__ +# It's controlled in the index.rst file. +autosummary_ignore_module_all = False + +# Each class and function docs start with the path to it +# Make the flwr_datasets.federated_dataset.FederatedDataset appear as FederatedDataset +# The full name is still at the top of the page +add_module_names = False + +def find_test_modules(package_path): + """Go through the python files and exclude every *_test.py file.""" + full_path_modules = [] + for root, dirs, files in os.walk(package_path): + for file in files: + if file.endswith('_test.py'): + # Construct the module path relative to the package directory + full_path = os.path.join(root, file) + relative_path = os.path.relpath(full_path, package_path) + # Convert file path to dotted module path + module_path = os.path.splitext(relative_path)[0].replace(os.sep, '.') + full_path_modules.append(module_path) + modules = [] + for full_path_module in full_path_modules: + parts = full_path_module.split('.') + for i in range(len(parts)): + modules.append('.'.join(parts[i:])) + return modules + +# Stop from documenting the *_test.py files. +# That's the only way to do that in autosummary (make the modules as mock_imports). +autodoc_mock_imports = find_test_modules(os.path.abspath("../../")) + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/datasets/doc/source/how-to-use-with-numpy.rst b/datasets/doc/source/how-to-use-with-numpy.rst index c3fbf85969e3..db80b712a13e 100644 --- a/datasets/doc/source/how-to-use-with-numpy.rst +++ b/datasets/doc/source/how-to-use-with-numpy.rst @@ -3,7 +3,7 @@ Use with NumPy Let's integrate ``flwr-datasets`` with NumPy. -Prepare the desired partitioning:: +Create a ``FederatedDataset``:: from flwr_datasets import FederatedDataset @@ -11,6 +11,22 @@ Prepare the desired partitioning:: partition = fds.load_partition(0, "train") centralized_dataset = fds.load_full("test") +Inspect the names of the features:: + + partition.features + +In case of CIFAR10, you should see the following output. + +.. code-block:: none + + {'img': Image(decode=True, id=None), + 'label': ClassLabel(names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', + 'frog', 'horse', 'ship', 'truck'], id=None)} + +We will use the keys in the partition features in order to apply transformations to the data or pass it to a ML model. Let's move to the transformations. + +NumPy +----- Transform to NumPy:: partition_np = partition.with_format("numpy") diff --git a/datasets/doc/source/how-to-use-with-pytorch.rst b/datasets/doc/source/how-to-use-with-pytorch.rst index 497266dd1e69..85e7833b0869 100644 --- a/datasets/doc/source/how-to-use-with-pytorch.rst +++ b/datasets/doc/source/how-to-use-with-pytorch.rst @@ -10,7 +10,7 @@ Standard setup - download the dataset, choose the partitioning:: partition = fds.load_partition(0, "train") centralized_dataset = fds.load_full("test") -Determine the names of our features (you can alternatively do that directly on the Hugging Face website). The name can +Determine the names of the features (you can alternatively do that directly on the Hugging Face website). The name can vary e.g. "img" or "image", "label" or "labels":: partition.features @@ -38,7 +38,7 @@ That is why we iterate over all the samples from this batch and apply our transf return batch partition_torch = partition.with_transform(apply_transforms) - # At this point, you can check if you didn't make any mistakes by calling partition_torch[0] + # Now, you can check if you didn't make any mistakes by calling partition_torch[0] dataloader = DataLoader(partition_torch, batch_size=64) @@ -70,8 +70,10 @@ If you want to divide the dataset, you can use (at any point before passing the Or you can simply calculate the indices yourself:: partition_len = len(partition) - partition_train = partition[:int(0.8 * partition_len)] - partition_test = partition[int(0.8 * partition_len):] + # Split `partition` 80:20 + num_train_examples = int(0.8 * partition_len) + partition_train = partition.select(range(num_train_examples)) ) # use first 80% + partition_test = partition.select(range(num_train_examples, partition_len)) ) # use last 20% And during the training loop, you need to apply one change. With a typical dataloader, you get a list returned for each iteration:: diff --git a/datasets/doc/source/how-to-use-with-tensorflow.rst b/datasets/doc/source/how-to-use-with-tensorflow.rst index 86a1f4e0da8a..a63145d9dffa 100644 --- a/datasets/doc/source/how-to-use-with-tensorflow.rst +++ b/datasets/doc/source/how-to-use-with-tensorflow.rst @@ -1,10 +1,32 @@ Use with TensorFlow =================== -Let's integrate ``flwr-datasets`` with TensorFlow. We show you three ways how to convert the data into the formats +Let's integrate ``flwr-datasets`` with ``TensorFlow``. We show you three ways how to convert the data into the formats that ``TensorFlow``'s models expect. Please note that, especially for the smaller datasets, the performance of the following methods is very close. We recommend you choose the method you are the most comfortable with. +Create a ``FederatedDataset``:: + + from flwr_datasets import FederatedDataset + + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) + partition = fds.load_partition(0, "train") + centralized_dataset = fds.load_full("test") + +Inspect the names of the features:: + + partition.features + +In case of CIFAR10, you should see the following output. + +.. code-block:: none + + {'img': Image(decode=True, id=None), + 'label': ClassLabel(names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', + 'frog', 'horse', 'ship', 'truck'], id=None)} + +We will use the keys in the partition features in order to construct a `tf.data.Dataset _`. Let's move to the transformations. + NumPy ----- The first way is to transform the data into the NumPy arrays. It's an easier option that is commonly used. Feel free to @@ -14,17 +36,7 @@ follow the :doc:`how-to-use-with-numpy` tutorial, especially if you are a beginn TensorFlow Dataset ------------------ -Work with ``TensorFlow Dataset`` abstraction. - -Standard setup:: - - from flwr_datasets import FederatedDataset - - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_full("test") - -Transformation to the TensorFlow Dataset:: +Transform the data to ``TensorFlow Dataset``:: tf_dataset = partition.to_tf_dataset(columns="img", label_cols="label", batch_size=64, shuffle=True) @@ -33,17 +45,7 @@ Transformation to the TensorFlow Dataset:: TensorFlow Tensors ------------------ -Change the data type to TensorFlow Tensors (it's not the TensorFlow dataset). - -Standard setup:: - - from flwr_datasets import FederatedDataset - - fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10}) - partition = fds.load_partition(0, "train") - centralized_dataset = fds.load_full("test") - -Transformation to the TensorFlow Tensors :: +Transform the data to the TensorFlow `tf.Tensor `_ (it's not the TensorFlow dataset):: data_tf = partition.with_format("tf") # Assuming you have defined your model and compiled it diff --git a/datasets/doc/source/index.rst b/datasets/doc/source/index.rst index 7b19624b341a..ae7e7259f504 100644 --- a/datasets/doc/source/index.rst +++ b/datasets/doc/source/index.rst @@ -38,11 +38,15 @@ References Information-oriented API reference and other reference material. -.. toctree:: - :maxdepth: 2 +.. autosummary:: + :toctree: ref-api + :template: autosummary/module.rst :caption: API reference + :recursive: + + flwr_datasets + - ref-api-flwr-datasets Main features ------------- diff --git a/datasets/doc/source/ref-api-flwr-datasets.rst b/datasets/doc/source/ref-api-flwr-datasets.rst deleted file mode 100644 index 2e6a9e731add..000000000000 --- a/datasets/doc/source/ref-api-flwr-datasets.rst +++ /dev/null @@ -1,27 +0,0 @@ -flwr\_datasets (Python API reference) -====================== - -Federated Dataset ------------------ -.. autoclass:: flwr_datasets.federated_dataset.FederatedDataset - :members: - - -partitioner ------------ - -.. automodule:: flwr_datasets.partitioner - - -Partitioner ------------ - -.. autoclass:: flwr_datasets.partitioner.Partitioner - :members: - - -IID Partitioner ---------------- - -.. autoclass:: flwr_datasets.partitioner.IidPartitioner - :members: diff --git a/datasets/doc/source/tutorial-quickstart.rst b/datasets/doc/source/tutorial-quickstart.rst index 8a70ee8854be..b93a08f234f2 100644 --- a/datasets/doc/source/tutorial-quickstart.rst +++ b/datasets/doc/source/tutorial-quickstart.rst @@ -5,11 +5,11 @@ Run Flower Datasets as fast as possible by learning only the essentials. Install Federated Datasets -------------------------- -Run on the command line +On the command line, run .. code-block:: bash - python -m pip install flwr-datasets[vision] + python -m pip install "flwr-datasets[vision]" Install the ML framework ------------------------ @@ -28,12 +28,11 @@ PyTorch Choose the dataset ------------------ Choose the dataset by going to Hugging Face `Datasets Hub `_ and searching for your -dataset by name. Note that the name is case sensitive, so make sure to pass the correct name as the `dataset` parameter -to `FederatedDataset`. +dataset by name that you will pass to the `dataset` parameter of `FederatedDataset`. Note that the name is case sensitive. Partition the dataset --------------------- -:: +To iid partition your dataset, choose the split you want to partition and the number of partitions:: from flwr_datasets import FederatedDataset @@ -42,29 +41,51 @@ Partition the dataset centralized_dataset = fds.load_full("test") Now you're ready to go. You have ten partitions created from the train split of the MNIST dataset and the test split -for the centralized evaluation. We will convert the type of the dataset from Hugging Face's Dataset type to the one +for the centralized evaluation. We will convert the type of the dataset from Hugging Face's `Dataset` type to the one supported by your framework. +Display the features +-------------------- +Determine the names of the features of your dataset (you can alternatively do that directly on the Hugging Face +website). The names can vary along different datasets e.g. "img" or "image", "label" or "labels". You will also see +the names of label categories. Type:: + + partition.features + +In case of CIFAR10, you should see the following output. + +.. code-block:: none + + {'img': Image(decode=True, id=None), + 'label': ClassLabel(names=['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', + 'frog', 'horse', 'ship', 'truck'], id=None)} + +Note that the image is denoted by "img" which is crucial for the next steps (conversion you the ML +framework of your choice). + Conversion ---------- -For more detailed instructions, go to :doc:`how-to-use-with-pytorch`. +For more detailed instructions, go to :doc:`how-to-use-with-pytorch`, :doc:`how-to-use-with-numpy`, or +:doc:`how-to-use-with-tensorflow`. PyTorch DataLoader ^^^^^^^^^^^^^^^^^^ -Transform the Dataset directly into the DataLoader:: +Transform the Dataset into the DataLoader, use the PyTorch transforms (`Compose` and all the others are also +possible):: from torch.utils.data import DataLoader from torchvision.transforms import ToTensor transforms = ToTensor() - partition_torch = partition.map( - lambda img: {"img": transforms(img)}, input_columns="img" - ).with_format("torch") + def apply_transforms(batch): + batch["img"] = [transforms(img) for img in batch["img"]] + return batch + partition_torch = partition.with_transform(apply_transforms) dataloader = DataLoader(partition_torch, batch_size=64) NumPy ^^^^^ -NumPy can be used as input to the TensorFlow model and is very straightforward:: +NumPy can be used as input to the TensorFlow and scikit-learn models and it is very straightforward:: partition_np = partition.with_format("numpy") X_train, y_train = partition_np["img"], partition_np["label"] diff --git a/dev/build-docs.sh b/dev/build-docs.sh index 0c913c6fc1d8..45a4dfca0adf 100755 --- a/dev/build-docs.sh +++ b/dev/build-docs.sh @@ -13,8 +13,7 @@ cd examples/doc make docs cd $ROOT -cd datasets/doc -make docs +./datasets/dev/build-flwr-datasets-docs.sh cd $ROOT cd doc diff --git a/dev/update-examples.sh b/dev/update-examples.sh index 83dff474c855..c802e21503b7 100755 --- a/dev/update-examples.sh +++ b/dev/update-examples.sh @@ -22,9 +22,14 @@ cd examples/ for d in $(printf '%s\n' */ | sort -V); do example=${d%/} # For each example, copy the README into the source of the Example docs - [[ $example = doc ]] || cp $example/README.md $ROOT/examples/doc/source/$example.md 2>&1 >/dev/null + [[ $example != doc ]] && cp $example/README.md $ROOT/examples/doc/source/$example.md 2>&1 >/dev/null + # For each example, copy all images of the _static folder into the examples + # docs static folder + [[ $example != doc ]] && [ -d "$example/_static" ] && { + cp $example/_static/**.{jpg,png,jpeg} $ROOT/examples/doc/source/_static/ 2>/dev/null || true + } # For each example, insert the name of the example into the index file - [[ $example = doc ]] || (echo $INSERT_LINE; echo a; echo $example; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null + [[ $example != doc ]] && (echo $INSERT_LINE; echo a; echo $example; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null done echo "\`\`\`" >> $INDEX diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po new file mode 100644 index 000000000000..f077264b4aab --- /dev/null +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -0,0 +1,13608 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2022 Flower Labs GmbH +# This file is distributed under the same license as the Flower package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Flower main\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-11-23 18:31+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_Hans\n" +"Language-Team: zh_Hans \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.13.1\n" + +#: ../../source/contributor-explanation-architecture.rst:2 +msgid "Flower Architecture" +msgstr "" + +#: ../../source/contributor-explanation-architecture.rst:5 +msgid "Edge Client Engine" +msgstr "" + +#: ../../source/contributor-explanation-architecture.rst:7 +msgid "" +"`Flower `_ core framework architecture with Edge " +"Client Engine" +msgstr "" + +#: ../../source/contributor-explanation-architecture.rst:13 +msgid "Virtual Client Engine" +msgstr "" + +#: ../../source/contributor-explanation-architecture.rst:15 +msgid "" +"`Flower `_ core framework architecture with Virtual " +"Client Engine" +msgstr "" + +#: ../../source/contributor-explanation-architecture.rst:21 +msgid "Virtual Client Engine and Edge Client Engine in the same workload" +msgstr "" + +#: ../../source/contributor-explanation-architecture.rst:23 +msgid "" +"`Flower `_ core framework architecture with both " +"Virtual Client Engine and Edge Client Engine" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:2 +msgid "Creating New Messages" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:4 +msgid "" +"This is a simple guide for creating a new type of message between the " +"server and clients in Flower." +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:6 +msgid "" +"Let's suppose we have the following example functions in " +":code:`server.py` and :code:`numpy_client.py`..." +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:8 +msgid "Server's side:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:17 +msgid "Client's side:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:26 +msgid "" +"Let's now see what we need to implement in order to get this simple " +"function between the server and client to work!" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:30 +msgid "Message Types for Protocol Buffers" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:32 +msgid "" +"The first thing we need to do is to define a message type for the RPC " +"system in :code:`transport.proto`. Note that we have to do it for both " +"the request and response messages. For more details on the syntax of " +"proto3, please see the `official documentation " +"`_." +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:35 +msgid "Within the :code:`ServerMessage` block:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:52 +msgid "Within the ClientMessage block:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:70 +msgid "" +"Make sure to also add a field of the newly created message type in " +":code:`oneof msg`." +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:72 +msgid "Once that is done, we will compile the file with:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:78 +msgid "If it compiles succesfully, you should see the following message:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:87 +msgid "Serialization and Deserialization Functions" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:89 +msgid "" +"Our next step is to add functions to serialize and deserialize Python " +"datatypes to or from our defined RPC message types. You should add these " +"functions in :code:`serde.py`." +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:91 +msgid "The four functions:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:112 +msgid "Sending the Message from the Server" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:114 +msgid "" +"Now write the request function in your Client Proxy class (e.g., " +":code:`grpc_client_proxy.py`) using the serde functions you just created:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:128 +msgid "Receiving the Message by the Client" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:130 +msgid "" +"Last step! Modify the code in :code:`message_handler.py` to check the " +"field of your message and call the :code:`example_response` function. " +"Remember to use the serde functions!" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:132 +msgid "Within the handle function:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:139 +msgid "And add a new function:" +msgstr "" + +#: ../../source/contributor-how-create-new-messages.rst:149 +msgid "Hopefully, when you run your program you will get the intended result!" +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:2 +msgid "Contribute translations" +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:4 +msgid "" +"Since `Flower 1.5 `_ we have introduced translations to " +"our doc pages, but, as you might have noticed, the translations are often" +" imperfect. If you speak languages other than English, you might be able " +"to help us in our effort to make Federated Learning accessible to as many" +" people as possible by contributing to those translations! This might " +"also be a great opportunity for those wanting to become open source " +"contributors with little prerequistes." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:13 +msgid "" +"Our translation project is publicly available over on `Weblate " +"`_, this " +"where most of the work will happen." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:18 +msgid "Contribute to existing languages" +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:23 +msgid "" +"The first thing you will need to do in order to contribute is to create a" +" free Weblate account on this `page " +"`_. More information about" +" profile settings can be found `here " +"`_." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:29 +msgid "" +"Once you are signed in to Weblate, you can navigate to the `Flower " +"Framework project `_. Here, you should see the different existing languages" +" that can be found on the website." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:34 +msgid "" +"Once you have selected the language you want to contribute to, you should" +" see a similar interface to this:" +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:39 +msgid "" +"The most straight forward option here is to click on the ``Translate`` " +"button on the top right (in the ``Translation status`` section). This " +"will automatically bring you to the translation interface for " +"untranslated strings." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:43 +msgid "This is what the interface looks like:" +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:47 +msgid "" +"You input your translation in the textbox at the top and then, once you " +"are happy with it, you either press ``Save and continue`` (to save the " +"translation and go to the next untranslated string), ``Save and stay`` " +"(to save the translation and stay on the same page), ``Suggest`` (to add " +"your translation to suggestions for other users to view), or ``Skip`` (to" +" go to the next untranslated string without saving anything)." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:54 +msgid "" +"In order to help with the translations, you can see on the bottom the " +"``Nearby strings``, the ``Comments`` (from other contributors), the " +"``Automatic suggestions`` (from machine translation engines), the " +"translations in ``Other languages``, and the ``History`` of translations " +"for this string." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:59 +msgid "" +"On the right, under the ``String information`` section, you can also " +"click the link under ``Source string location`` in order to view the " +"source of the doc file containing the string." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:63 +msgid "" +"For more information about translating using Weblate, you can check out " +"this `in-depth guide " +"`_." +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:67 +msgid "Add new languages" +msgstr "" + +#: ../../source/contributor-how-to-contribute-translations.rst:69 +msgid "" +"If you want to add a new language, you will first have to contact us, " +"either on `Slack `_, or by opening an " +"issue on our `GitHub repo `_." +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:2 +msgid "Develop in VSCode Dev Containers" +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:4 +msgid "" +"When working on the Flower framework we want to ensure that all " +"contributors use the same developer environment to format code or run " +"tests. For this purpose we are using the VSCode Remote Containers " +"extension. What is it? Read the following quote:" +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:7 +msgid "" +"The Visual Studio Code Remote - Containers extension lets you use a " +"Docker container as a fully-featured development environment. It allows " +"you to open any folder inside (or mounted into) a container and take " +"advantage of Visual Studio Code's full feature set. A " +":code:`devcontainer.json` file in your project tells VS Code how to " +"access (or create) a development container with a well-defined tool and " +"runtime stack. This container can be used to run an application or to " +"separate tools, libraries, or runtimes needed for working with a " +"codebase." +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:9 +msgid "" +"Workspace files are mounted from the local file system or copied or " +"cloned into the container. Extensions are installed and run inside the " +"container, where they have full access to the tools, platform, and file " +"system. This means that you can seamlessly switch your entire development" +" environment just by connecting to a different container." +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:11 +msgid "" +"Source: `Official VSCode documentation " +"`_" +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:15 +msgid "Getting started" +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:17 +msgid "" +"Configuring and setting up the :code:`Dockerfile` as well the " +"configuration for the devcontainer can be a bit more involved. The good " +"thing is you want have to do it. Usually it should be enough to install " +"Docker on your system and ensure its available on your command line. " +"Additionally, install the `VSCode Containers Extension `_." +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:19 +msgid "" +"Now you should be good to go. When starting VSCode, it will ask you to " +"run in the container environment and - if you confirm - automatically " +"build the container and use it. To manually instruct VSCode to use the " +"devcontainer, you can, after installing the extension, click the green " +"area in the bottom left corner of your VSCode window and select the " +"option *(Re)Open Folder in Container*." +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:21 +msgid "" +"In some cases your setup might be more involved. For those cases consult " +"the following sources:" +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:23 +msgid "" +"`Developing inside a Container " +"`_" +msgstr "" + +#: ../../source/contributor-how-to-develop-in-vscode-dev-containers.rst:24 +msgid "" +"`Remote development in Containers " +"`_" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:2 +msgid "Install development versions" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:5 +msgid "Install development versions of Flower" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:8 +msgid "Using Poetry (recommended)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:10 +msgid "" +"Install a ``flwr`` pre-release from PyPI: update the ``flwr`` dependency " +"in ``pyproject.toml`` and then reinstall (don't forget to delete " +"``poetry.lock`` (``rm poetry.lock``) before running ``poetry install``)." +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:12 +msgid "" +"``flwr = { version = \"1.0.0a0\", allow-prereleases = true }`` (without " +"extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:13 +msgid "" +"``flwr = { version = \"1.0.0a0\", allow-prereleases = true, extras = " +"[\"simulation\"] }`` (with extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:15 +msgid "" +"Install ``flwr`` from a local copy of the Flower source code via " +"``pyproject.toml``:" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:17 +msgid "``flwr = { path = \"../../\", develop = true }`` (without extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:18 +msgid "" +"``flwr = { path = \"../../\", develop = true, extras = [\"simulation\"] " +"}`` (with extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:20 +msgid "Install ``flwr`` from a local wheel file via ``pyproject.toml``:" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:22 +msgid "" +"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\" }`` (without" +" extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:23 +msgid "" +"``flwr = { path = \"../../dist/flwr-1.0.0-py3-none-any.whl\", extras = " +"[\"simulation\"] }`` (with extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:25 +msgid "" +"Please refer to the Poetry documentation for further details: `Poetry " +"Dependency Specification `_" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:28 +msgid "Using pip (recommended on Colab)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:30 +msgid "Install a ``flwr`` pre-release from PyPI:" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:32 +msgid "``pip install -U --pre flwr`` (without extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:33 +msgid "``pip install -U --pre flwr[simulation]`` (with extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:35 +msgid "" +"Python packages can be installed from git repositories. Use one of the " +"following commands to install the Flower directly from GitHub." +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:37 +msgid "Install ``flwr`` from the default GitHub branch (``main``):" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:39 +msgid "" +"``pip install flwr@git+https://github.com/adap/flower.git`` (without " +"extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:40 +msgid "" +"``pip install flwr[simulation]@git+https://github.com/adap/flower.git`` " +"(with extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:42 +msgid "Install ``flwr`` from a specific GitHub branch (``branch-name``):" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:44 +msgid "" +"``pip install flwr@git+https://github.com/adap/flower.git@branch-name`` " +"(without extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:45 +msgid "" +"``pip install flwr[simulation]@git+https://github.com/adap/flower.git" +"@branch-name`` (with extras)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:49 +msgid "Open Jupyter Notebooks on Google Colab" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:51 +msgid "" +"Open the notebook ``doc/source/tutorial-get-started-with-flower-" +"pytorch.ipynb``:" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:53 +msgid "" +"https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-get-started-with-flower-pytorch.ipynb" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:55 +msgid "" +"Open a development version of the same notebook from branch `branch-name`" +" by changing ``main`` to ``branch-name`` (right after ``blob``):" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:57 +msgid "" +"https://colab.research.google.com/github/adap/flower/blob/branch-" +"name/doc/source/tutorial-get-started-with-flower-pytorch.ipynb" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:59 +msgid "Install a `whl` on Google Colab:" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:61 +msgid "" +"In the vertical icon grid on the left hand side, select ``Files`` > " +"``Upload to session storage``" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:62 +msgid "Upload the whl (e.g., ``flwr-1.6.0-py3-none-any.whl``)" +msgstr "" + +#: ../../source/contributor-how-to-install-development-versions.rst:63 +msgid "" +"Change ``!pip install -q 'flwr[simulation]' torch torchvision " +"matplotlib`` to ``!pip install -q 'flwr-1.6.0-py3-none-" +"any.whl[simulation]' torch torchvision matplotlib``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:2 +msgid "Release Flower" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:4 +msgid "" +"This document describes the current release process. It may or may not " +"change in the future." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:7 +msgid "Before the release" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:9 +msgid "" +"Update the changelog (``changelog.md``) with all relevant changes that " +"happened after the last release. If the last release was tagged " +"``v1.2.0``, you can use the following URL to see all commits that got " +"merged into ``main`` since then:" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:11 +msgid "" +"`GitHub: Compare v1.2.0...main " +"`_" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:13 +msgid "" +"Thank the authors who contributed since the last release. This can be " +"done by running the ``./dev/add-shortlog.sh`` convenience script (it can " +"be ran multiple times and will update the names in the list if new " +"contributors were added in the meantime)." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:16 +msgid "During the release" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:18 +msgid "" +"The version number of a release is stated in ``pyproject.toml``. To " +"release a new version of Flower, the following things need to happen (in " +"that order):" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:20 +msgid "" +"Update the ``changelog.md`` section header ``Unreleased`` to contain the " +"version number and date for the release you are building. Create a pull " +"request with the change." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:21 +msgid "" +"Tag the release commit with the version number as soon as the PR is " +"merged: ``git tag v0.12.3``, then ``git push --tags``. This will create a" +" draft release on GitHub containing the correct artifacts and the " +"relevant part of the changelog." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:22 +msgid "Check the draft release on GitHub, and if everything is good, publish it." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:25 +msgid "After the release" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:27 +msgid "Create a pull request which contains the following changes:" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:29 +msgid "Increase the minor version in ``pyproject.toml`` by one." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:30 +msgid "Update all files which contain the current version number if necessary." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:31 +msgid "Add a new ``Unreleased`` section in ``changelog.md``." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:33 +msgid "" +"Merge the pull request on the same day (i.e., before a new nighly release" +" gets published to PyPI)." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:36 +msgid "Publishing a pre-release" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:39 +msgid "Pre-release naming" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:41 +msgid "" +"PyPI supports pre-releases (alpha, beta, release candiate). Pre-releases " +"MUST use one of the following naming patterns:" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:43 +msgid "Alpha: ``MAJOR.MINOR.PATCHaN``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:44 +msgid "Beta: ``MAJOR.MINOR.PATCHbN``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:45 +msgid "Release candiate (RC): ``MAJOR.MINOR.PATCHrcN``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:47 +msgid "Examples include:" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:49 +msgid "``1.0.0a0``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:50 +msgid "``1.0.0b0``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:51 +msgid "``1.0.0rc0``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:52 +msgid "``1.0.0rc1``" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:54 +msgid "" +"This is in line with PEP-440 and the recommendations from the Python " +"Packaging Authority (PyPA):" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:57 +msgid "`PEP-440 `_" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:58 +msgid "" +"`PyPA Choosing a versioning scheme " +"`_" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:60 +msgid "" +"Note that the approach defined by PyPA is not compatible with SemVer " +"2.0.0 spec, for details consult the `Semantic Versioning Specification " +"`_ (specifically item " +"11 on precedence)." +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:63 +msgid "Pre-release classification" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:65 +msgid "Should the next pre-release be called alpha, beta, or release candidate?" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:67 +msgid "" +"RC: feature complete, no known issues (apart from issues that are " +"classified as \"won't fix\" for the next stable release) - if no issues " +"surface this will become the next stable release" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:68 +msgid "Beta: feature complete, allowed to have known issues" +msgstr "" + +#: ../../source/contributor-how-to-release-flower.rst:69 +msgid "Alpha: not feature complete, allowed to have known issues" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:2 +msgid "Set up a virtual env" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:4 +msgid "" +"It is recommended to run your Python setup within a virtual environment. " +"This guide shows three different examples how to create a virtual " +"environment with pyenv virtualenv, poetry, or Anaconda. You can follow " +"the instructions or choose your preferred setup." +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:9 +msgid "Python Version" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:11 +#: ../../source/how-to-install-flower.rst:8 +msgid "" +"Flower requires at least `Python 3.8 `_, " +"but `Python 3.10 `_ or above is " +"recommended." +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:14 +msgid "Virutualenv with Pyenv/Virtualenv" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:16 +msgid "" +"One of the recommended virtual environment is `pyenv " +"`_/`virtualenv `_. Please see `Flower examples " +"`_ for details." +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:18 +msgid "" +"Once Pyenv is set up, you can use it to install `Python Version 3.10 " +"`_ or above:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:24 +msgid "Create the virtualenv with:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:31 +msgid "Activate the virtualenv by running the following command:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:39 +msgid "Virtualenv with Poetry" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:41 +msgid "" +"The Flower examples are based on `Poetry `_ to manage dependencies. After installing Poetry you " +"simply create a virtual environment with:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:47 +msgid "" +"If you open a new terminal you can activate the previously created " +"virtual environment with the following command:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:55 +msgid "Virtualenv with Anaconda" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:57 +msgid "" +"If you prefer to use Anaconda for your virtual environment then install " +"and setup the `conda `_ package. After setting it up you can " +"create a virtual environment with:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:63 +msgid "and activate the virtual environment with:" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:71 +msgid "And then?" +msgstr "" + +#: ../../source/contributor-how-to-set-up-a-virtual-env.rst:73 +msgid "" +"As soon as you created your virtual environment you clone one of the " +"`Flower examples `_." +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:2 +msgid "Write documentation" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:6 +msgid "Project layout" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:8 +msgid "" +"The Flower documentation lives in the ``doc`` directory. The Sphinx-based" +" documentation system supports both reStructuredText (``.rst`` files) and" +" Markdown (``.md`` files)." +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:10 +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:119 +msgid "" +"Note that, in order to build the documentation locally (with ``poetry run" +" make html``, like described below), `Pandoc " +"_` needs to be installed on the " +"system." +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:14 +msgid "Edit an existing page" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:16 +msgid "Edit an existing ``.rst`` (or ``.md``) file under ``doc/source/``" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:17 +#: ../../source/contributor-how-to-write-documentation.rst:27 +msgid "Compile the docs: ``cd doc``, then ``poetry run make html``" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:18 +#: ../../source/contributor-how-to-write-documentation.rst:28 +msgid "Open ``doc/build/html/index.html`` in the browser to check the result" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:22 +msgid "Create a new page" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:24 +msgid "Add new ``.rst`` file under ``doc/source/``" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:25 +msgid "Add content to the new ``.rst`` file" +msgstr "" + +#: ../../source/contributor-how-to-write-documentation.rst:26 +msgid "Link to the new rst from ``index.rst``" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:2 +msgid "Good first contributions" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:4 +msgid "" +"We welcome contributions to Flower! However, it is not always easy to " +"know where to start. We therefore put together a few recommendations on " +"where to start to increase your chances of getting your PR accepted into " +"the Flower codebase." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:11 +msgid "Where to start" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:13 +msgid "" +"Until the Flower core library matures it will be easier to get PR's " +"accepted if they only touch non-core areas of the codebase. Good " +"candidates to get started are:" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:17 +msgid "Documentation: What's missing? What could be expressed more clearly?" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:18 +msgid "Baselines: See below." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:19 +msgid "Examples: See below." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:23 +msgid "Request for Flower Baselines" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:25 +msgid "" +"If you are not familiar with Flower Baselines, you should probably check-" +"out our `contributing guide for baselines `_." +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:27 +msgid "" +"You should then check out the open `issues " +"`_" +" for baseline requests. If you find a baseline that you'd like to work on" +" and that has no assignes, feel free to assign it to yourself and start " +"working on it!" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:31 +msgid "" +"Otherwise, if you don't find a baseline you'd like to work on, be sure to" +" open a new issue with the baseline request template!" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:34 +msgid "Request for examples" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:36 +msgid "" +"We wish we had more time to write usage examples because we believe they " +"help users to get started with building what they want to build. Here are" +" a few ideas where we'd be happy to accept a PR:" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:40 +msgid "Llama 2 fine-tuning, with Hugging Face Transformers and PyTorch" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:41 +msgid "XGBoost" +msgstr "" + +#: ../../source/contributor-ref-good-first-contributions.rst:42 +msgid "Android ONNX on-device training" +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:2 +msgid "Secure Aggregation Protocols" +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:4 +msgid "" +"Include SecAgg, SecAgg+, and LightSecAgg protocol. The LightSecAgg " +"protocol has not been implemented yet, so its diagram and abstraction may" +" not be accurate in practice. The SecAgg protocol can be considered as a " +"special case of the SecAgg+ protocol." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:8 +msgid "The :code:`SecAgg+` abstraction" +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:10 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:161 +msgid "" +"In this implementation, each client will be assigned with a unique index " +"(int) for secure aggregation, and thus many python dictionaries used have" +" keys of int type rather than ClientProxy type." +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:65 +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:198 +msgid "" +"The Flower server will execute and process received results in the " +"following order:" +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:159 +msgid "The :code:`LightSecAgg` abstraction" +msgstr "" + +#: ../../source/contributor-ref-secure-aggregation-protocols.rst:271 +msgid "Types" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:2 +msgid "Contribute on GitHub" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:4 +msgid "" +"This guide is for people who want to get involved with Flower, but who " +"are not used to contributing to GitHub projects." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:6 +msgid "" +"If you're familiar with how contributing on GitHub works, you can " +"directly checkout our `getting started guide for contributors " +"`_ and " +"examples of `good first contributions `_." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +msgid "Setting up the repository" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +msgid "**Create a GitHub account and setup Git**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:15 +msgid "" +"Git is a distributed version control tool. This allows for an entire " +"codebase's history to be stored and every developer's machine. It is a " +"software that will need to be installed on your local machine, you can " +"follow this `guide `_ to set it up." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:18 +msgid "" +"GitHub, itself, is a code hosting platform for version control and " +"collaboration. It allows for everyone to collaborate and work from " +"anywhere on remote repositories." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:20 +msgid "" +"If you haven't already, you will need to create an account on `GitHub " +"`_." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:22 +msgid "" +"The idea behind the generic Git and GitHub workflow boils down to this: " +"you download code from a remote repository on GitHub, make changes " +"locally and keep track of them using Git and then you upload your new " +"history back to GitHub." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +msgid "**Forking the Flower repository**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:26 +msgid "" +"A fork is a personal copy of a GitHub repository. To create one for " +"Flower, you must navigate to https://github.com/adap/flower (while " +"connected to your GitHub account) and click the ``Fork`` button situated " +"on the top right of the page." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:31 +msgid "" +"You can change the name if you want, but this is not necessary as this " +"version of Flower will be yours and will sit inside your own account " +"(i.e., in your own list of repositories). Once created, you should see on" +" the top left corner that you are looking at your own version of Flower." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +msgid "**Cloning your forked repository**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:37 +msgid "" +"The next step is to download the forked repository on your machine to be " +"able to make changes to it. On your forked repository page, you should " +"first click on the ``Code`` button on the right, this will give you the " +"ability to copy the HTTPS link of the repository." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:43 +msgid "" +"Once you copied the \\, you can open a terminal on your machine, " +"navigate to the place you want to download the repository to and type:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +msgid "" +"This will create a `flower/` (or the name of your fork if you renamed it)" +" folder in the current working directory." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +msgid "**Add origin**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:52 +msgid "You can then go into the repository folder:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:58 +msgid "" +"And here we will need to add an origin to our repository. The origin is " +"the \\ of the remote fork repository. To obtain it, we can do as " +"previously mentioned by going to our fork repository on our GitHub " +"account and copying the link." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:63 +msgid "" +"Once the \\ is copied, we can type the following command in our " +"terminal:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:92 +msgid "**Add upstream**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:71 +msgid "" +"Now we will add an upstream address to our repository. Still in the same " +"directroy, we must run the following command:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:78 +msgid "The following diagram visually explains what we did in the previous steps:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:82 +msgid "" +"The upstream is the GitHub remote address of the parent repository (in " +"this case Flower), i.e. the one we eventually want to contribute to and " +"therefore need an up-to-date history of. The origin is just the GitHub " +"remote address of the forked repository we created, i.e. the copy (fork) " +"in our own account." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:86 +msgid "" +"To make sure our local version of the fork is up-to-date with the latest " +"changes from the Flower repository, we can execute the following command:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:95 +msgid "Setting up the coding environment" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:97 +msgid "" +"This can be achieved by following this `getting started guide for " +"contributors`_ (note that you won't need to clone the repository). Once " +"you are able to write code and test it, you can finally start making " +"changes!" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:102 +msgid "Making changes" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:104 +msgid "" +"Before making any changes make sure you are up-to-date with your " +"repository:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:110 +msgid "And with Flower's repository:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +msgid "**Create a new branch**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:117 +msgid "" +"To make the history cleaner and easier to work with, it is good practice " +"to create a new branch for each feature/project that needs to be " +"implemented." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:120 +msgid "" +"To do so, just run the following command inside the repository's " +"directory:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +msgid "**Make changes**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +msgid "Write great code and create wonderful changes using your favorite editor!" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +msgid "**Test and format your code**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:130 +msgid "" +"Don't forget to test and format your code! Otherwise your code won't be " +"able to be merged into the Flower repository. This is done so the " +"codebase stays consistent and easy to understand." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:133 +msgid "To do so, we have written a few scripts that you can execute:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +msgid "**Stage changes**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:143 +msgid "" +"Before creating a commit that will update your history, you must specify " +"to Git which files it needs to take into account." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:145 +msgid "This can be done with:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:151 +msgid "" +"To check which files have been modified compared to the last version " +"(last commit) and to see which files are staged for commit, you can use " +"the :code:`git status` command." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +msgid "**Commit changes**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:155 +msgid "" +"Once you have added all the files you wanted to commit using :code:`git " +"add`, you can finally create your commit using this command:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:161 +msgid "" +"The \\ is there to explain to others what the commit " +"does. It should be written in an imperative style and be concise. An " +"example would be :code:`git commit -m \"Add images to README\"`." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:173 +msgid "**Push the changes to the fork**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:165 +msgid "" +"Once we have committed our changes, we have effectively updated our local" +" history, but GitHub has no way of knowing this unless we push our " +"changes to our origin's remote address:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:172 +msgid "" +"Once this is done, you will see on the GitHub that your forked repo was " +"updated with the changes you have made." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +msgid "Creating and merging a pull request (PR)" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:203 +msgid "**Create the PR**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:179 +msgid "" +"Once you have pushed changes, on the GitHub webpage of your repository " +"you should see the following message:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:183 +msgid "Otherwise you can always find this option in the `Branches` page." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:185 +msgid "" +"Once you click the `Compare & pull request` button, you should see " +"something similar to this:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:189 +msgid "At the top you have an explanation of which branch will be merged where:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:193 +msgid "" +"In this example you can see that the request is to merge the branch " +"``doc-fixes`` from my forked repository to branch ``main`` from the " +"Flower repository." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:195 +msgid "" +"The input box in the middle is there for you to describe what your PR " +"does and to link it to existing issues. We have placed comments (that " +"won't be rendered once the PR is opened) to guide you through the " +"process." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:198 +msgid "" +"At the bottom you will find the button to open the PR. This will notify " +"reviewers that a new PR has been opened and that they should look over it" +" to merge or to request changes." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:201 +msgid "" +"If your PR is not yet ready for review, and you don't want to notify " +"anyone, you have the option to create a draft pull request:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +msgid "**Making new changes**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 +msgid "" +"Once the PR has been opened (as draft or not), you can still push new " +"commits to it the same way we did before, by making changes to the branch" +" associated with the PR." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:228 +msgid "**Review the PR**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 +msgid "" +"Once the PR has been opened or once the draft PR has been marked as " +"ready, a review from code owners will be automatically requested:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:213 +msgid "" +"Code owners will then look into the code, ask questions, request changes " +"or validate the PR." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:215 +msgid "Merging will be blocked if there are ongoing requested changes." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:219 +msgid "" +"To resolve them, just push the necessary changes to the branch associated" +" with the PR:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:223 +msgid "And resolve the conversation:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:227 +msgid "" +"Once all the conversations have been resolved, you can re-request a " +"review." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:248 +msgid "**Once the PR is merged**" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 +msgid "" +"If all the automatic tests have passed and reviewers have no more changes" +" to request, they can approve the PR and merge it." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:235 +msgid "" +"Once it is merged, you can delete the branch on GitHub (a button should " +"appear to do so) and also delete it locally by doing:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:242 +msgid "Then you should update your forked repository by doing:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 +msgid "Example of first contribution" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:254 +msgid "Problem" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:256 +msgid "" +"For our documentation, we’ve started to use the `Diàtaxis framework " +"`_." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:258 +msgid "" +"Our “How to” guides should have titles that continue the sencence “How to" +" …”, for example, “How to upgrade to Flower 1.0”." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:260 +msgid "" +"Most of our guides do not follow this new format yet, and changing their " +"title is (unfortunately) more involved than one might think." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:262 +msgid "" +"This issue is about changing the title of a doc from present continious " +"to present simple." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:264 +msgid "" +"Let's take the example of “Saving Progress” which we changed to “Save " +"Progress”. Does this pass our check?" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:266 +msgid "Before: ”How to saving progress” ❌" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:268 +msgid "After: ”How to save progress” ✅" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:271 +msgid "Solution" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:273 +msgid "" +"This is a tiny change, but it’ll allow us to test your end-to-end setup. " +"After cloning and setting up the Flower repo, here’s what you should do:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:275 +msgid "Find the source file in `doc/source`" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:276 +msgid "" +"Make the change in the `.rst` file (beware, the dashes under the title " +"should be the same length as the title itself)" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:277 +msgid "" +"Build the docs and check the result: ``_" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:280 +msgid "Rename file" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:282 +msgid "" +"You might have noticed that the file name still reflects the old wording." +" If we just change the file, then we break all existing links to it - it " +"is **very important** to avoid that, breaking links can harm our search " +"engine ranking." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:285 +msgid "Here’s how to change the file name:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:287 +msgid "Change the file name to `save-progress.rst`" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:288 +msgid "Add a redirect rule to `doc/source/conf.py`" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:290 +msgid "" +"This will cause a redirect from `saving-progress.html` to `save-" +"progress.html`, old links will continue to work." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:293 +msgid "Apply changes in the index file" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:295 +msgid "" +"For the lateral navigation bar to work properly, it is very important to " +"update the `index.rst` file as well. This is where we define the whole " +"arborescence of the navbar." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:298 +msgid "Find and modify the file name in `index.rst`" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:301 +msgid "Open PR" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:303 +msgid "" +"Commit the changes (commit messages are always imperative: “Do " +"something”, in this case “Change …”)" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:304 +msgid "Push the changes to your fork" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:305 +msgid "Open a PR (as shown above)" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:306 +msgid "Wait for it to be approved!" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:307 +msgid "Congrats! 🥳 You're now officially a Flower contributor!" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:311 +msgid "How to write a good PR title" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:313 +msgid "" +"A well-crafted PR title helps team members quickly understand the purpose" +" and scope of the changes being proposed. Here's a guide to help you " +"write a good GitHub PR title:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:315 +msgid "" +"1. Be Clear and Concise: Provide a clear summary of the changes in a " +"concise manner. 1. Use Actionable Verbs: Start with verbs like \"Add,\" " +"\"Update,\" or \"Fix\" to indicate the purpose. 1. Include Relevant " +"Information: Mention the affected feature or module for context. 1. Keep " +"it Short: Avoid lengthy titles for easy readability. 1. Use Proper " +"Capitalization and Punctuation: Follow grammar rules for clarity." +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:321 +msgid "" +"Let's start with a few examples for titles that should be avoided because" +" they do not provide meaningful information:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:323 +msgid "Implement Algorithm" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:324 +msgid "Database" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:325 +msgid "Add my_new_file.py to codebase" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:326 +msgid "Improve code in module" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:327 +msgid "Change SomeModule" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:329 +msgid "" +"Here are a few positive examples which provide helpful information " +"without repeating how they do it, as that is already visible in the " +"\"Files changed\" section of the PR:" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:331 +msgid "Update docs banner to mention Flower Summit 2023" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:332 +msgid "Remove unnecessary XGBoost dependency" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:333 +msgid "Remove redundant attributes in strategies subclassing FedAvg" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:334 +msgid "Add CI job to deploy the staging system when the `main` branch changes" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:335 +msgid "" +"Add new amazing library which will be used to improve the simulation " +"engine" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:339 +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:548 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:946 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:747 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:713 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:367 +msgid "Next steps" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:341 +msgid "" +"Once you have made your first PR, and want to contribute more, be sure to" +" check out the following :" +msgstr "" + +#: ../../source/contributor-tutorial-contribute-on-github.rst:343 +msgid "" +"`Good first contributions `_, where you should particularly look " +"into the :code:`baselines` contributions." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:2 +msgid "Get started as a contributor" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 +msgid "Prerequisites" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:7 +msgid "`Python 3.7 `_ or above" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:8 +msgid "`Poetry 1.3 `_ or above" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:9 +msgid "(Optional) `pyenv `_" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:10 +msgid "(Optional) `pyenv-virtualenv `_" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:12 +msgid "" +"Flower uses :code:`pyproject.toml` to manage dependencies and configure " +"development tools (the ones which support it). Poetry is a build tool " +"which supports `PEP 517 `_." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:18 +msgid "Developer Machine Setup" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:20 +msgid "" +"First, clone the `Flower repository `_ " +"from GitHub::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:26 +msgid "" +"Second, create a virtual environment (and activate it). If you chose to " +"use :code:`pyenv` (with the :code:`pyenv-virtualenv` plugin) and already " +"have it installed , you can use the following convenience script (by " +"default it will use :code:`Python 3.8.17`, but you can change it by " +"providing a specific :code:``)::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:33 +msgid "" +"If you don't have :code:`pyenv` installed, you can use the following " +"script that will install pyenv, set it up and create the virtual " +"environment (with :code:`Python 3.8.17` by default)::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:39 +msgid "" +"Third, install the Flower package in development mode (think :code:`pip " +"install -e`) along with all necessary dependencies::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:46 +msgid "Convenience Scripts" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:48 +msgid "" +"The Flower repository contains a number of convenience scripts to make " +"recurring development tasks easier and less error-prone. See the " +":code:`/dev` subdirectory for a full list. The following scripts are " +"amonst the most important ones:" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:54 +msgid "Create/Delete Virtual Environment" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:62 +msgid "Compile ProtoBuf Definitions" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:69 +msgid "Auto-Format Code" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:76 +msgid "Run Linters and Tests" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:83 +msgid "Run Github Actions (CI) locally" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:85 +msgid "" +"Developers could run the full set of Github Actions workflows under their" +" local environment by using `Act _`. " +"Please refer to the installation instructions under the linked repository" +" and run the next command under Flower main cloned repository folder::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:92 +msgid "" +"The Flower default workflow would run by setting up the required Docker " +"machines underneath." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:97 +msgid "Build Release" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:99 +msgid "" +"Flower uses Poetry to build releases. The necessary command is wrapped in" +" a simple script::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:104 +msgid "" +"The resulting :code:`.whl` and :code:`.tar.gz` releases will be stored in" +" the :code:`/dist` subdirectory." +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:109 +msgid "Build Documentation" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:111 +msgid "" +"Flower's documentation uses `Sphinx `_. " +"There's no convenience script to re-build the documentation yet, but it's" +" pretty easy::" +msgstr "" + +#: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:117 +msgid "This will generate HTML documentation in ``doc/build/html``." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: FedBN in PyTorch - From Centralized To Federated" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:4 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload with `FedBN " +"`_, a federated training strategy " +"designed for non-iid data. We are using PyTorch to train a Convolutional " +"Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset. " +"When applying FedBN, only few changes needed compared to `Example: " +"PyTorch - From Centralized To Federated `_." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:9 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:10 +msgid "Centralized Training" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:10 +msgid "" +"All files are revised based on `Example: PyTorch - From Centralized To " +"Federated `_. The only thing to do is modifying the file called " +":code:`cifar.py`, revised part is shown below:" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:13 +msgid "" +"The model architecture defined in class Net() is added with Batch " +"Normalization layers accordingly." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:41 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:157 +msgid "You can now run your machine learning workload:" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:47 +msgid "" +"So far this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"federated learning system within FedBN, the sytstem consists of one " +"server and two clients." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:51 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:167 +msgid "Federated Training" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:53 +msgid "" +"If you have read `Example: PyTorch - From Centralized To Federated " +"`_, the following parts are easy to follow, onyl " +":code:`get_parameters` and :code:`set_parameters` function in " +":code:`client.py` needed to revise. If not, please read the `Example: " +"PyTorch - From Centralized To Federated `_. first." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:56 +msgid "" +"Our example consists of one *server* and two *clients*. In FedBN, " +":code:`server.py` keeps unchanged, we can start the server directly." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:62 +msgid "" +"Finally, we will revise our *client* logic by changing " +":code:`get_parameters` and :code:`set_parameters` in :code:`client.py`, " +"we will exclude batch normalization parameters from model parameter list " +"when sending to or receiving from the server." +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:85 +msgid "Now, you can now open two additional terminal windows and run" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:91 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your (previously centralized) PyTorch project run federated " +"learning with FedBN strategy across two clients. Congratulations!" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:94 +#: ../../source/example-jax-from-centralized-to-federated.rst:277 +#: ../../source/example-mxnet-walk-through.rst:356 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:310 +#: ../../source/tutorial-quickstart-jax.rst:283 +msgid "Next Steps" +msgstr "" + +#: ../../source/example-fedbn-pytorch-from-centralized-to-federated.rst:96 +msgid "" +"The full source code for this example can be found `here " +"`_. Our example is of course somewhat over-" +"simplified because both clients load the exact same dataset, which isn't " +"realistic. You're now prepared to explore this topic further. How about " +"using different subsets of CIFAR-10 on each client? How about adding more" +" clients?" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:2 +msgid "Example: JAX - Run JAX Federated" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:4 +#: ../../source/tutorial-quickstart-jax.rst:10 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing JAX workload. We are using JAX to train a linear " +"regression model on a scikit-learn dataset. We will structure the example" +" similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. First, we build a centralized " +"training approach based on the `Linear Regression with JAX " +"`_" +" tutorial`. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:10 +#: ../../source/tutorial-quickstart-jax.rst:16 +msgid "" +"Before we start building our JAX example, we need install the packages " +":code:`jax`, :code:`jaxlib`, :code:`scikit-learn`, and :code:`flwr`:" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:18 +#: ../../source/tutorial-quickstart-jax.rst:24 +msgid "Linear Regression with JAX" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:20 +#: ../../source/tutorial-quickstart-jax.rst:26 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a :code:`Linear Regression` model. If you want a more in-depth " +"explanation of what's going on then have a look at the official `JAX " +"documentation `_." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:23 +#: ../../source/tutorial-quickstart-jax.rst:29 +msgid "" +"Let's create a new file called :code:`jax_training.py` with all the " +"components required for a traditional (centralized) linear regression " +"training. First, the JAX packages :code:`jax` and :code:`jaxlib` need to " +"be imported. In addition, we need to import :code:`sklearn` since we use " +":code:`make_regression` for the dataset and :code:`train_test_split` to " +"split the dataset into a training and test set. You can see that we do " +"not yet import the :code:`flwr` package for federated learning. This will" +" be done later." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:37 +#: ../../source/tutorial-quickstart-jax.rst:43 +msgid "" +"The :code:`load_data()` function loads the mentioned training and test " +"sets." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:47 +#: ../../source/tutorial-quickstart-jax.rst:53 +msgid "" +"The model architecture (a very simple :code:`Linear Regression` model) is" +" defined in :code:`load_model()`." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:59 +#: ../../source/tutorial-quickstart-jax.rst:65 +msgid "" +"We now need to define the training (function :code:`train()`), which " +"loops over the training set and measures the loss (function " +":code:`loss_fn()`) for each batch of training examples. The loss function" +" is separate since JAX takes derivatives with a :code:`grad()` function " +"(defined in the :code:`main()` function and called in :code:`train()`)." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:77 +#: ../../source/tutorial-quickstart-jax.rst:83 +msgid "" +"The evaluation of the model is defined in the function " +":code:`evaluation()`. The function takes all test examples and measures " +"the loss of the linear regression model." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:88 +#: ../../source/tutorial-quickstart-jax.rst:94 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model using JAX. " +"As already mentioned, the :code:`jax.grad()` function is defined in " +":code:`main()` and passed to :code:`train()`." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:105 +#: ../../source/tutorial-quickstart-jax.rst:111 +msgid "You can now run your (centralized) JAX linear regression workload:" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:111 +#: ../../source/tutorial-quickstart-jax.rst:117 +msgid "" +"So far this should all look fairly familiar if you've used JAX before. " +"Let's take the next step and use what we've built to create a simple " +"federated learning system consisting of one server and two clients." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:115 +#: ../../source/tutorial-quickstart-jax.rst:121 +msgid "JAX meets Flower" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:117 +#: ../../source/tutorial-quickstart-jax.rst:123 +msgid "" +"The concept of federating an existing workload is always the same and " +"easy to understand. We have to start a *server* and then use the code in " +":code:`jax_training.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server*, which averages all received " +"parameter updates. This describes one round of the federated learning " +"process, and we repeat this for multiple rounds." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:123 +#: ../../source/example-mxnet-walk-through.rst:204 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:181 +#: ../../source/tutorial-quickstart-jax.rst:129 +msgid "" +"Our example consists of one *server* and two *clients*. Let's set up " +":code:`server.py` first. The *server* needs to import the Flower package " +":code:`flwr`. Next, we use the :code:`start_server` function to start a " +"server and tell it to perform three rounds of federated learning." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:133 +#: ../../source/example-mxnet-walk-through.rst:214 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:191 +#: ../../source/tutorial-quickstart-jax.rst:139 +msgid "We can already start the *server*:" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:139 +#: ../../source/tutorial-quickstart-jax.rst:145 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined JAX training in :code:`jax_training.py`. Our" +" *client* needs to import :code:`flwr`, but also :code:`jax` and " +":code:`jaxlib` to update the parameters on our JAX model:" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:154 +#: ../../source/tutorial-quickstart-jax.rst:160 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`FlowerClient`. :code:`NumPyClient` is slightly " +"easier to implement than :code:`Client` if you use a framework with good " +"NumPy interoperability (like JAX) because it avoids some of the " +"boilerplate that would otherwise be necessary. :code:`FlowerClient` needs" +" to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing" +" the model:" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:161 +#: ../../source/example-mxnet-walk-through.rst:242 +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid ":code:`set_parameters (optional)`" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:160 +#: ../../source/example-mxnet-walk-through.rst:241 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +#: ../../source/tutorial-quickstart-jax.rst:166 +msgid "" +"set the model parameters on the local model that are received from the " +"server" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:161 +#: ../../source/tutorial-quickstart-jax.rst:167 +msgid "transform parameters to NumPy :code:`ndarray`'s" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:162 +#: ../../source/example-mxnet-walk-through.rst:243 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:220 +#: ../../source/tutorial-quickstart-jax.rst:168 +msgid "" +"loop over the list of model parameters received as NumPy " +":code:`ndarray`'s (think list of neural network layers)" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:163 +#: ../../source/example-mxnet-walk-through.rst:244 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:221 +#: ../../source/tutorial-quickstart-jax.rst:169 +#: ../../source/tutorial-quickstart-mxnet.rst:169 +#: ../../source/tutorial-quickstart-pytorch.rst:155 +#: ../../source/tutorial-quickstart-scikitlearn.rst:108 +msgid ":code:`get_parameters`" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:164 +#: ../../source/example-mxnet-walk-through.rst:245 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:222 +#: ../../source/tutorial-quickstart-jax.rst:170 +msgid "" +"get the model parameters and return them as a list of NumPy " +":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:167 +#: ../../source/example-mxnet-walk-through.rst:248 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-mxnet.rst:175 +#: ../../source/tutorial-quickstart-pytorch.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid ":code:`fit`" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:166 +#: ../../source/example-jax-from-centralized-to-federated.rst:170 +#: ../../source/example-mxnet-walk-through.rst:247 +#: ../../source/example-mxnet-walk-through.rst:251 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:224 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:228 +#: ../../source/tutorial-quickstart-jax.rst:172 +#: ../../source/tutorial-quickstart-jax.rst:176 +msgid "" +"update the parameters of the local model with the parameters received " +"from the server" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:167 +#: ../../source/example-mxnet-walk-through.rst:248 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +msgid "train the model on the local training set" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:168 +#: ../../source/tutorial-quickstart-jax.rst:174 +msgid "get the updated local model parameters and return them to the server" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:172 +#: ../../source/example-mxnet-walk-through.rst:253 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-pytorch.rst:164 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid ":code:`evaluate`" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:171 +#: ../../source/example-mxnet-walk-through.rst:252 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:229 +#: ../../source/tutorial-quickstart-jax.rst:177 +msgid "evaluate the updated model on the local test set" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:172 +#: ../../source/tutorial-quickstart-jax.rst:178 +msgid "return the local loss to the server" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:174 +#: ../../source/tutorial-quickstart-jax.rst:180 +msgid "" +"The challenging part is to transform the JAX model parameters from " +":code:`DeviceArray` to :code:`NumPy ndarray` to make them compatible with" +" `NumPyClient`." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:176 +#: ../../source/tutorial-quickstart-jax.rst:182 +msgid "" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`evaluate()` previously " +"defined in :code:`jax_training.py`. So what we really do here is we tell " +"Flower through our :code:`NumPyClient` subclass which of our already " +"defined functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:245 +#: ../../source/tutorial-quickstart-jax.rst:251 +msgid "Having defined the federation process, we can run it." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:268 +#: ../../source/example-mxnet-walk-through.rst:347 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:301 +#: ../../source/tutorial-quickstart-jax.rst:274 +msgid "And that's it. You can now open two additional terminal windows and run" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:274 +#: ../../source/tutorial-quickstart-jax.rst:280 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your JAX project run federated learning across two clients. " +"Congratulations!" +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:279 +#: ../../source/tutorial-quickstart-jax.rst:285 +msgid "" +"The source code of this example was improved over time and can be found " +"here: `Quickstart JAX `_. Our example is somewhat over-simplified because both " +"clients load the same dataset." +msgstr "" + +#: ../../source/example-jax-from-centralized-to-federated.rst:282 +#: ../../source/tutorial-quickstart-jax.rst:288 +msgid "" +"You're now prepared to explore this topic further. How about using a more" +" sophisticated model or using a different dataset? How about adding more " +"clients?" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:2 +msgid "Example: MXNet - Run MXNet Federated" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:4 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing MXNet workload. We are using MXNet to train a " +"Sequential model on the MNIST dataset. We will structure the example " +"similar to our `PyTorch - From Centralized To Federated " +"`_ walkthrough. MXNet and PyTorch are very " +"similar and a very good comparison between MXNet and PyTorch is given " +"`here `_. First, we build a centralized " +"training approach based on the `Handwritten Digit Recognition " +"`_" +" tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:10 +msgid "" +"Before we start setting up our MXNet example, we install the " +":code:`mxnet` and :code:`flwr` packages:" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:19 +msgid "MNIST Training with MXNet" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:21 +msgid "" +"We begin with a brief description of the centralized training code based " +"on a :code:`Sequential` model. If you want a more in-depth explanation of" +" what's going on then have a look at the official `MXNet tutorial " +"`_." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:24 +msgid "" +"Let's create a new file called:code:`mxnet_mnist.py` with all the " +"components required for a traditional (centralized) MNIST training. " +"First, the MXNet package :code:`mxnet` needs to be imported. You can see " +"that we do not yet import the :code:`flwr` package for federated " +"learning. This will be done later." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:42 +msgid "The :code:`load_data()` function loads the MNIST training and test sets." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:57 +msgid "" +"As already mentioned, we will use the MNIST dataset for this machine " +"learning workload. The model architecture (a very simple " +":code:`Sequential` model) is defined in :code:`model()`." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:70 +msgid "" +"We now need to define the training (function :code:`train()`) which loops" +" over the training set and measures the loss for each batch of training " +"examples." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:123 +msgid "" +"The evaluation of the model is defined in function :code:`test()`. The " +"function loops over all test samples and measures the loss and accuracy " +"of the model based on the test dataset." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:158 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our model on MNIST. " +"Note that the GPU/CPU device for the training and testing is defined " +"within the :code:`ctx` (context)." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:184 +msgid "You can now run your (centralized) MXNet machine learning workload:" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:190 +msgid "" +"So far this should all look fairly familiar if you've used MXNet (or even" +" PyTorch) before. Let's take the next step and use what we've built to " +"create a simple federated learning system consisting of one server and " +"two clients." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:194 +msgid "MXNet meets Flower" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:196 +msgid "" +"So far, it was not easily possible to use MXNet workloads for federated " +"learning because federated learning is not supported in MXNet. Since " +"Flower is fully agnostic towards the underlying machine learning " +"framework, it can be used to federated arbitrary machine learning " +"workloads. This section will show you how Flower can be used to federate " +"our centralized MXNet workload." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:198 +msgid "" +"The concept to federate an existing workload is always the same and easy " +"to understand. We have to start a *server* and then use the code in " +":code:`mxnet_mnist.py` for the *clients* that are connected to the " +"*server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the parameters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:220 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined MXNet training in :code:`mxnet_mnist.py`. " +"Our *client* needs to import :code:`flwr`, but also :code:`mxnet` to " +"update the parameters on our MXNet model:" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:235 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`MNISTClient`. :code:`NumPyClient` is slighly easier " +"to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or MXNet) because it avoids some of the " +"boilerplate that would otherwise be necessary. :code:`MNISTClient` needs " +"to implement four methods, two methods for getting/setting model " +"parameters, one method for training the model, and one method for testing" +" the model:" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:242 +msgid "transform MXNet :code:`NDArray`'s to NumPy :code:`ndarray`'s" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:249 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:226 +msgid "get the updated local model weights and return them to the server" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:253 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +msgid "return the local loss and accuracy to the server" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:255 +msgid "" +"The challenging part is to transform the MXNet parameters from " +":code:`NDArray` to :code:`NumPy Arrays` to make it readable for Flower." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:257 +msgid "" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`test()` previously " +"defined in :code:`mxnet_mnist.py`. So what we really do here is we tell " +"Flower through our :code:`NumPyClient` subclass which of our already " +"defined functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:319 +msgid "" +"Having defined data loading, model architecture, training, and evaluation" +" we can put everything together and train our :code:`Sequential` model on" +" MNIST." +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:353 +msgid "" +"in each window (make sure that the server is still running before you do " +"so) and see your MXNet project run federated learning across two clients." +" Congratulations!" +msgstr "" + +#: ../../source/example-mxnet-walk-through.rst:358 +msgid "" +"The full source code for this example: `MXNet: From Centralized To " +"Federated (Code) `_. Our example is of course " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using a CNN or using a different dataset? How about " +"adding more clients?" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:2 +msgid "Example: PyTorch - From Centralized To Federated" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:4 +msgid "" +"This tutorial will show you how to use Flower to build a federated " +"version of an existing machine learning workload. We are using PyTorch to" +" train a Convolutional Neural Network on the CIFAR-10 dataset. First, we " +"introduce this machine learning task with a centralized training approach" +" based on the `Deep Learning with PyTorch " +"`_ " +"tutorial. Then, we build upon the centralized training code to run the " +"training in a federated fashion." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:12 +msgid "" +"We begin with a brief description of the centralized CNN training code. " +"If you want a more in-depth explanation of what's going on then have a " +"look at the official `PyTorch tutorial " +"`_." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:15 +msgid "" +"Let's create a new file called :code:`cifar.py` with all the components " +"required for a traditional (centralized) training on CIFAR-10. First, all" +" required packages (such as :code:`torch` and :code:`torchvision`) need " +"to be imported. You can see that we do not import any package for " +"federated learning. You can keep all these imports as they are even when " +"we add the federated learning components at a later point." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:32 +msgid "" +"As already mentioned we will use the CIFAR-10 dataset for this machine " +"learning workload. The model architecture (a very simple Convolutional " +"Neural Network) is defined in :code:`class Net()`." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:56 +msgid "" +"The :code:`load_data()` function loads the CIFAR-10 training and test " +"sets. The :code:`transform` normalized the data after loading." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:74 +msgid "" +"We now need to define the training (function :code:`train()`) which loops" +" over the training set, measures the loss, backpropagates it, and then " +"takes one optimizer step for each batch of training examples." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:76 +msgid "" +"The evaluation of the model is defined in the function :code:`test()`. " +"The function loops over all test samples and measures the loss of the " +"model based on the test dataset." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:136 +msgid "" +"Having defined the data loading, model architecture, training, and " +"evaluation we can put everything together and train our CNN on CIFAR-10." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:163 +msgid "" +"So far, this should all look fairly familiar if you've used PyTorch " +"before. Let's take the next step and use what we've built to create a " +"simple federated learning system consisting of one server and two " +"clients." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:169 +msgid "" +"The simple machine learning project discussed in the previous section " +"trains the model on a single dataset (CIFAR-10), we call this centralized" +" learning. This concept of centralized learning, as shown in the previous" +" section, is probably known to most of you, and many of you have used it " +"previously. Normally, if you'd want to run machine learning workloads in " +"a federated fashion, then you'd have to change most of your code and set " +"everything up from scratch. This can be a considerable effort." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:173 +msgid "" +"However, with Flower you can evolve your pre-existing code into a " +"federated learning setup without the need for a major rewrite." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:175 +msgid "" +"The concept is easy to understand. We have to start a *server* and then " +"use the code in :code:`cifar.py` for the *clients* that are connected to " +"the *server*. The *server* sends model parameters to the clients. The " +"*clients* run the training and update the paramters. The updated " +"parameters are sent back to the *server* which averages all received " +"parameter updates. This describes one round of the federated learning " +"process and we repeat this for multiple rounds." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:197 +msgid "" +"Finally, we will define our *client* logic in :code:`client.py` and build" +" upon the previously defined centralized training in :code:`cifar.py`. " +"Our *client* needs to import :code:`flwr`, but also :code:`torch` to " +"update the paramters on our PyTorch model:" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:213 +msgid "" +"Implementing a Flower *client* basically means implementing a subclass of" +" either :code:`flwr.client.Client` or :code:`flwr.client.NumPyClient`. " +"Our implementation will be based on :code:`flwr.client.NumPyClient` and " +"we'll call it :code:`CifarClient`. :code:`NumPyClient` is slighly easier " +"to implement than :code:`Client` if you use a framework with good NumPy " +"interoperability (like PyTorch or TensorFlow/Keras) because it avoids " +"some of the boilerplate that would otherwise be necessary. " +":code:`CifarClient` needs to implement four methods, two methods for " +"getting/setting model parameters, one method for training the model, and " +"one method for testing the model:" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 +msgid ":code:`set_parameters`" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:232 +msgid "" +"The two :code:`NumPyClient` methods :code:`fit` and :code:`evaluate` make" +" use of the functions :code:`train()` and :code:`test()` previously " +"defined in :code:`cifar.py`. So what we really do here is we tell Flower " +"through our :code:`NumPyClient` subclass which of our already defined " +"functions to call for training and evaluation. We included type " +"annotations to give you a better understanding of the data types that get" +" passed around." +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:280 +msgid "" +"All that's left to do it to define a function that loads both model and " +"data, creates a :code:`CifarClient`, and starts this client. You load " +"your data and model by using :code:`cifar.py`. Start :code:`CifarClient` " +"with the function :code:`fl.client.start_numpy_client()` by pointing it " +"at the same IP adress we used in :code:`server.py`:" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:307 +msgid "" +"in each window (make sure that the server is running before you do so) " +"and see your (previously centralized) PyTorch project run federated " +"learning across two clients. Congratulations!" +msgstr "" + +#: ../../source/example-pytorch-from-centralized-to-federated.rst:312 +msgid "" +"The full source code for this example: `PyTorch: From Centralized To " +"Federated (Code) `_. Our example is, of course, " +"somewhat over-simplified because both clients load the exact same " +"dataset, which isn't realistic. You're now prepared to explore this topic" +" further. How about using different subsets of CIFAR-10 on each client? " +"How about adding more clients?" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:2 +msgid "Example: Walk-Through PyTorch & MNIST" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:4 +msgid "" +"In this tutorial we will learn, how to train a Convolutional Neural " +"Network on MNIST using Flower and PyTorch." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:6 +#: ../../source/tutorial-quickstart-mxnet.rst:14 +#: ../../source/tutorial-quickstart-pytorch.rst:17 +#: ../../source/tutorial-quickstart-scikitlearn.rst:14 +msgid "" +"Our example consists of one *server* and two *clients* all having the " +"same model." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:8 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:12 +#: ../../source/tutorial-quickstart-pytorch.rst:23 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running :" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:18 +msgid "" +"Since we want to use PyTorch to solve a computer vision task, let's go " +"ahead an install PyTorch and the **torchvision** library:" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:26 +msgid "Ready... Set... Train!" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:28 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on PyTorch's `Basic MNIST " +"Example `_. This " +"will allow you see how easy it is to wrap your code with Flower and begin" +" training in a federated way. We provide you with two helper scripts, " +"namely *run-server.sh*, and *run-clients.sh*. Don't be afraid to look " +"inside, they are simple enough =)." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:31 +msgid "" +"Go ahead and launch on a terminal the *run-server.sh* script first as " +"follows:" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:38 +msgid "Now that the server is up and running, go ahead and launch the clients." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:45 +msgid "" +"Et voilà! You should be seeing the training procedure and, after a few " +"iterations, the test accuracy for each client." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:66 +msgid "Now, let's see what is really happening inside." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:69 +#: ../../source/tutorial-quickstart-ios.rst:129 +#: ../../source/tutorial-quickstart-mxnet.rst:224 +#: ../../source/tutorial-quickstart-pytorch.rst:203 +#: ../../source/tutorial-quickstart-scikitlearn.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:98 +#: ../../source/tutorial-quickstart-xgboost.rst:306 +msgid "Flower Server" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:71 +msgid "" +"Inside the server helper script *run-server.sh* you will find the " +"following code that basically runs the :code:`server.py`" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:78 +msgid "" +"We can go a bit deeper and see that :code:`server.py` simply launches a " +"server that will coordinate three rounds of training. Flower Servers are " +"very customizable, but for simple workloads, we can start a server using " +"the :ref:`start_server ` function and " +"leave all the configuration possibilities at their default values, as " +"seen below." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:89 +#: ../../source/tutorial-quickstart-ios.rst:34 +#: ../../source/tutorial-quickstart-mxnet.rst:34 +#: ../../source/tutorial-quickstart-pytorch.rst:37 +#: ../../source/tutorial-quickstart-scikitlearn.rst:40 +#: ../../source/tutorial-quickstart-tensorflow.rst:29 +#: ../../source/tutorial-quickstart-xgboost.rst:52 +msgid "Flower Client" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:91 +msgid "" +"Next, let's take a look at the *run-clients.sh* file. You will see that " +"it contains the main loop that starts a set of *clients*." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:100 +msgid "" +"**cid**: is the client ID. It is an integer that uniquely identifies " +"client identifier." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:101 +msgid "**sever_address**: String that identifies IP and port of the server." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:102 +msgid "" +"**nb_clients**: This defines the number of clients being created. This " +"piece of information is not required by the client, but it helps us " +"partition the original MNIST dataset to make sure that every client is " +"working on unique subsets of both *training* and *test* sets." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:104 +msgid "" +"Again, we can go deeper and look inside :code:`flwr_example/quickstart-" +"pytorch/client.py`. After going through the argument parsing code at the " +"beginning of our :code:`main` function, you will find a call to " +":code:`mnist.load_data`. This function is responsible for partitioning " +"the original MNIST datasets (*training* and *test*) and returning a " +":code:`torch.utils.data.DataLoader` s for each of them. We then " +"instantiate a :code:`PytorchMNISTClient` object with our client ID, our " +"DataLoaders, the number of epochs in each round, and which device we want" +" to use for training (CPU or GPU)." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:119 +msgid "" +"The :code:`PytorchMNISTClient` object when finally passed to " +":code:`fl.client.start_client` along with the server's address as the " +"training process begins." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:123 +msgid "A Closer Look" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:125 +msgid "" +"Now, let's look closely into the :code:`PytorchMNISTClient` inside " +":code:`flwr_example.quickstart-pytorch.mnist` and see what it is doing:" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:226 +msgid "" +"The first thing to notice is that :code:`PytorchMNISTClient` instantiates" +" a CNN model inside its constructor" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:244 +msgid "" +"The code for the CNN is available under :code:`quickstart-pytorch.mnist` " +"and it is reproduced below. It is the same network found in `Basic MNIST " +"Example `_." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:290 +msgid "" +"The second thing to notice is that :code:`PytorchMNISTClient` class " +"inherits from the :code:`fl.client.Client`, and hence it must implement " +"the following methods:" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:315 +msgid "" +"When comparing the abstract class to its derived class " +":code:`PytorchMNISTClient` you will notice that :code:`fit` calls a " +":code:`train` function and that :code:`evaluate` calls a :code:`test`: " +"function." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:317 +msgid "" +"These functions can both be found inside the same :code:`quickstart-" +"pytorch.mnist` module:" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:437 +msgid "" +"Observe that these functions encapsulate regular training and test loops " +"and provide :code:`fit` and :code:`evaluate` with final statistics for " +"each round. You could substitute them with your custom train and test " +"loops and change the network architecture, and the entire example would " +"still work flawlessly. As a matter of fact, why not try and modify the " +"code to an example of your liking?" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:444 +msgid "Give It a Try" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:445 +msgid "" +"Looking through the quickstart code description above will have given a " +"good understanding of how *clients* and *servers* work in Flower, how to " +"run a simple experiment, and the internals of a client wrapper. Here are " +"a few things you could try on your own and get more experience with " +"Flower:" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:448 +msgid "" +"Try and change :code:`PytorchMNISTClient` so it can accept different " +"architectures." +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:449 +msgid "Modify the :code:`train` function so that it accepts different optimizers" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:450 +msgid "" +"Modify the :code:`test` function so that it proves not only the top-1 " +"(regular accuracy) but also the top-5 accuracy?" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:451 +msgid "" +"Go larger! Try to adapt the code to larger images and datasets. Why not " +"try training on ImageNet with a ResNet-50?" +msgstr "" + +#: ../../source/example-walkthrough-pytorch-mnist.rst:453 +msgid "You are ready now. Enjoy learning in a federated way!" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:2 +msgid "Differential privacy" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:4 +msgid "" +"Flower provides differential privacy (DP) wrapper classes for the easy " +"integration of the central DP guarantees provided by DP-FedAvg into " +"training pipelines defined in any of the various ML frameworks that " +"Flower is compatible with." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:7 +msgid "" +"Please note that these components are still experimental, the correct " +"configuration of DP for a specific task is still an unsolved problem." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:10 +msgid "" +"The name DP-FedAvg is misleading since it can be applied on top of any FL" +" algorithm that conforms to the general structure prescribed by the " +"FedOpt family of algorithms." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:13 +msgid "DP-FedAvg" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:15 +msgid "" +"DP-FedAvg, originally proposed by McMahan et al. [mcmahan]_ and extended " +"by Andrew et al. [andrew]_, is essentially FedAvg with the following " +"modifications." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:17 +msgid "" +"**Clipping** : The influence of each client's update is bounded by " +"clipping it. This is achieved by enforcing a cap on the L2 norm of the " +"update, scaling it down if needed." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:18 +msgid "" +"**Noising** : Gaussian noise, calibrated to the clipping threshold, is " +"added to the average computed at the server." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:20 +msgid "" +"The distribution of the update norm has been shown to vary from task-to-" +"task and to evolve as training progresses. Therefore, we use an adaptive " +"approach [andrew]_ that continuously adjusts the clipping threshold to " +"track a prespecified quantile of the update norm distribution." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:23 +msgid "Simplifying Assumptions" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:25 +msgid "" +"We make (and attempt to enforce) a number of assumptions that must be " +"satisfied to ensure that the training process actually realises the " +":math:`(\\epsilon, \\delta)` guarantees the user has in mind when " +"configuring the setup." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:27 +msgid "" +"**Fixed-size subsampling** :Fixed-size subsamples of the clients must be " +"taken at each round, as opposed to variable-sized Poisson subsamples." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:28 +msgid "" +"**Unweighted averaging** : The contributions from all the clients must " +"weighted equally in the aggregate to eliminate the requirement for the " +"server to know in advance the sum of the weights of all clients available" +" for selection." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:29 +msgid "" +"**No client failures** : The set of available clients must stay constant " +"across all rounds of training. In other words, clients cannot drop out or" +" fail." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:31 +msgid "" +"The first two are useful for eliminating a multitude of complications " +"associated with calibrating the noise to the clipping threshold while the" +" third one is required to comply with the assumptions of the privacy " +"analysis." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:34 +msgid "" +"These restrictions are in line with constraints imposed by Andrew et al. " +"[andrew]_." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:37 +msgid "Customizable Responsibility for Noise injection" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:38 +msgid "" +"In contrast to other implementations where the addition of noise is " +"performed at the server, you can configure the site of noise injection to" +" better match your threat model. We provide users with the flexibility to" +" set up the training such that each client independently adds a small " +"amount of noise to the clipped update, with the result that simply " +"aggregating the noisy updates is equivalent to the explicit addition of " +"noise to the non-noisy aggregate at the server." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:41 +msgid "" +"To be precise, if we let :math:`m` be the number of clients sampled each " +"round and :math:`\\sigma_\\Delta` be the scale of the total Gaussian " +"noise that needs to be added to the sum of the model updates, we can use " +"simple maths to show that this is equivalent to each client adding noise " +"with scale :math:`\\sigma_\\Delta/\\sqrt{m}`." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:44 +msgid "Wrapper-based approach" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:46 +msgid "" +"Introducing DP to an existing workload can be thought of as adding an " +"extra layer of security around it. This inspired us to provide the " +"additional server and client-side logic needed to make the training " +"process differentially private as wrappers for instances of the " +":code:`Strategy` and :code:`NumPyClient` abstract classes respectively. " +"This wrapper-based approach has the advantage of being easily composable " +"with other wrappers that someone might contribute to the Flower library " +"in the future, e.g., for secure aggregation. Using Inheritance instead " +"can be tedious because that would require the creation of new sub- " +"classes every time a new class implementing :code:`Strategy` or " +":code:`NumPyClient` is defined." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:49 +msgid "Server-side logic" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:51 +msgid "" +"The first version of our solution was to define a decorator whose " +"constructor accepted, among other things, a boolean valued variable " +"indicating whether adaptive clipping was to be enabled or not. We quickly" +" realized that this would clutter its :code:`__init__()` function with " +"variables corresponding to hyperparameters of adaptive clipping that " +"would remain unused when it was disabled. A cleaner implementation could " +"be achieved by splitting the functionality into two decorators, " +":code:`DPFedAvgFixed` and :code:`DPFedAvgAdaptive`, with the latter sub- " +"classing the former. The constructors for both classes accept a boolean " +"parameter :code:`server_side_noising`, which, as the name suggests, " +"determines where noising is to be performed." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:54 +msgid "DPFedAvgFixed" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:56 +msgid "" +"The server-side capabilities required for the original version of DP-" +"FedAvg, i.e., the one which performed fixed clipping, can be completely " +"captured with the help of wrapper logic for just the following two " +"methods of the :code:`Strategy` abstract class." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:58 +msgid "" +":code:`configure_fit()` : The config dictionary being sent by the wrapped" +" :code:`Strategy` to each client needs to be augmented with an additional" +" value equal to the clipping threshold (keyed under " +":code:`dpfedavg_clip_norm`) and, if :code:`server_side_noising=true`, " +"another one equal to the scale of the Gaussian noise that needs to be " +"added at the client (keyed under :code:`dpfedavg_noise_stddev`). This " +"entails *post*-processing of the results returned by the wrappee's " +"implementation of :code:`configure_fit()`." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:59 +msgid "" +":code:`aggregate_fit()`: We check whether any of the sampled clients " +"dropped out or failed to upload an update before the round timed out. In " +"that case, we need to abort the current round, discarding any successful " +"updates that were received, and move on to the next one. On the other " +"hand, if all clients responded successfully, we must force the averaging " +"of the updates to happen in an unweighted manner by intercepting the " +":code:`parameters` field of :code:`FitRes` for each received update and " +"setting it to 1. Furthermore, if :code:`server_side_noising=true`, each " +"update is perturbed with an amount of noise equal to what it would have " +"been subjected to had client-side noising being enabled. This entails " +"*pre*-processing of the arguments to this method before passing them on " +"to the wrappee's implementation of :code:`aggregate_fit()`." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:62 +msgid "" +"We can't directly change the aggregation function of the wrapped strategy" +" to force it to add noise to the aggregate, hence we simulate client-side" +" noising to implement server-side noising." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:64 +msgid "" +"These changes have been put together into a class called " +":code:`DPFedAvgFixed`, whose constructor accepts the strategy being " +"decorated, the clipping threshold and the number of clients sampled every" +" round as compulsory arguments. The user is expected to specify the " +"clipping threshold since the order of magnitude of the update norms is " +"highly dependent on the model being trained and providing a default value" +" would be misleading. The number of clients sampled at every round is " +"required to calculate the amount of noise that must be added to each " +"individual update, either by the server or the clients." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:67 +msgid "DPFedAvgAdaptive" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:69 +msgid "" +"The additional functionality required to facilitate adaptive clipping has" +" been provided in :code:`DPFedAvgAdaptive`, a subclass of " +":code:`DPFedAvgFixed`. It overrides the above-mentioned methods to do the" +" following." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:71 +msgid "" +":code:`configure_fit()` : It intercepts the config dict returned by " +":code:`super.configure_fit()` to add the key-value pair " +":code:`dpfedavg_adaptive_clip_enabled:True` to it, which the client " +"interprets as an instruction to include an indicator bit (1 if update " +"norm <= clipping threshold, 0 otherwise) in the results returned by it." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:73 +msgid "" +":code:`aggregate_fit()` : It follows a call to " +":code:`super.aggregate_fit()` with one to :code:`__update_clip_norm__()`," +" a procedure which adjusts the clipping threshold on the basis of the " +"indicator bits received from the sampled clients." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:77 +msgid "Client-side logic" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:79 +msgid "" +"The client-side capabilities required can be completely captured through " +"wrapper logic for just the :code:`fit()` method of the " +":code:`NumPyClient` abstract class. To be precise, we need to *post-" +"process* the update computed by the wrapped client to clip it, if " +"necessary, to the threshold value supplied by the server as part of the " +"config dictionary. In addition to this, it may need to perform some extra" +" work if either (or both) of the following keys are also present in the " +"dict." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:81 +msgid "" +":code:`dpfedavg_noise_stddev` : Generate and add the specified amount of " +"noise to the clipped update." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:82 +msgid "" +":code:`dpfedavg_adaptive_clip_enabled` : Augment the metrics dict in the " +":code:`FitRes` object being returned to the server with an indicator bit," +" calculated as described earlier." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:86 +msgid "Performing the :math:`(\\epsilon, \\delta)` analysis" +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:88 +msgid "" +"Assume you have trained for :math:`n` rounds with sampling fraction " +":math:`q` and noise multiplier :math:`z`. In order to calculate the " +":math:`\\epsilon` value this would result in for a particular " +":math:`\\delta`, the following script may be used." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:98 +msgid "" +"McMahan, H. Brendan, et al. \"Learning differentially private recurrent " +"language models.\" arXiv preprint arXiv:1710.06963 (2017)." +msgstr "" + +#: ../../source/explanation-differential-privacy.rst:100 +msgid "" +"Andrew, Galen, et al. \"Differentially private learning with adaptive " +"clipping.\" Advances in Neural Information Processing Systems 34 (2021): " +"17455-17466." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:2 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:292 +msgid "Federated evaluation" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:4 +msgid "" +"There are two main approaches to evaluating models in federated learning " +"systems: centralized (or server-side) evaluation and federated (or " +"client-side) evaluation." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:8 +msgid "Centralized Evaluation" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:11 +msgid "Built-In Strategies" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:13 +msgid "" +"All built-in strategies support centralized evaluation by providing an " +"evaluation function during initialization. An evaluation function is any " +"function that can take the current global model parameters as input and " +"return evaluation results:" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:58 +msgid "Custom Strategies" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:60 +msgid "" +"The :code:`Strategy` abstraction provides a method called " +":code:`evaluate` that can directly be used to evaluate the current global" +" model parameters. The current server implementation calls " +":code:`evaluate` after parameter aggregation and before federated " +"evaluation (see next paragraph)." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:65 +msgid "Federated Evaluation" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:68 +msgid "Implementing Federated Evaluation" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:70 +msgid "" +"Client-side evaluation happens in the :code:`Client.evaluate` method and " +"can be configured from the server side." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:101 +msgid "Configuring Federated Evaluation" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:103 +msgid "" +"Federated evaluation can be configured from the server side. Built-in " +"strategies support the following arguments:" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:105 +msgid "" +":code:`fraction_evaluate`: a :code:`float` defining the fraction of " +"clients that will be selected for evaluation. If " +":code:`fraction_evaluate` is set to :code:`0.1` and :code:`100` clients " +"are connected to the server, then :code:`10` will be randomly selected " +"for evaluation. If :code:`fraction_evaluate` is set to :code:`0.0`, " +"federated evaluation will be disabled." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:106 +msgid "" +":code:`min_evaluate_clients`: an :code:`int`: the minimum number of " +"clients to be selected for evaluation. If :code:`fraction_evaluate` is " +"set to :code:`0.1`, :code:`min_evaluate_clients` is set to 20, and " +":code:`100` clients are connected to the server, then :code:`20` clients " +"will be selected for evaluation." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:107 +msgid "" +":code:`min_available_clients`: an :code:`int` that defines the minimum " +"number of clients which need to be connected to the server before a round" +" of federated evaluation can start. If fewer than " +":code:`min_available_clients` are connected to the server, the server " +"will wait until more clients are connected before it continues to sample " +"clients for evaluation." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:108 +msgid "" +":code:`on_evaluate_config_fn`: a function that returns a configuration " +"dictionary which will be sent to the selected clients. The function will " +"be called during each round and provides a convenient way to customize " +"client-side evaluation from the server side, for example, to configure " +"the number of validation steps performed." +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:135 +msgid "Evaluating Local Model Updates During Training" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:137 +msgid "" +"Model parameters can also be evaluated during training. " +":code:`Client.fit` can return arbitrary evaluation results as a " +"dictionary:" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:177 +msgid "Full Code Example" +msgstr "" + +#: ../../source/explanation-federated-evaluation.rst:179 +msgid "" +"For a full code example that uses both centralized and federated " +"evaluation, see the *Advanced TensorFlow Example* (the same approach can " +"be applied to workloads implemented in any other framework): " +"https://github.com/adap/flower/tree/main/examples/advanced-tensorflow" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:10 +msgid "FED Template" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:12 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:12 +msgid "Table of Contents" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:14 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:14 +msgid "[Table of Contents](#table-of-contents)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:15 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:15 +msgid "[Summary](#summary)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:16 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:16 +msgid "[Motivation](#motivation)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:17 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:17 +msgid "[Goals](#goals)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:18 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:18 +msgid "[Non-Goals](#non-goals)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:19 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:19 +msgid "[Proposal](#proposal)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:20 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:23 +msgid "[Drawbacks](#drawbacks)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:21 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:24 +msgid "[Alternatives Considered](#alternatives-considered)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:22 +msgid "[Appendix](#appendix)" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:24 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:28 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:76 +msgid "Summary" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:26 +msgid "\\[TODO - sentence 1: summary of the problem\\]" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:28 +msgid "\\[TODO - sentence 2: summary of the solution\\]" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:30 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:47 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:77 +msgid "Motivation" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:32 +#: ../../source/fed/0000-20200102-fed-template.md:36 +#: ../../source/fed/0000-20200102-fed-template.md:40 +#: ../../source/fed/0000-20200102-fed-template.md:44 +#: ../../source/fed/0000-20200102-fed-template.md:48 +#: ../../source/fed/0000-20200102-fed-template.md:54 +#: ../../source/fed/0000-20200102-fed-template.md:58 +msgid "\\[TODO\\]" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:34 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:53 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:78 +msgid "Goals" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:38 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:59 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:79 +msgid "Non-Goals" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:42 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:65 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:80 +msgid "Proposal" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:46 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:85 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:129 +msgid "Drawbacks" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:50 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:86 +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:135 +msgid "Alternatives Considered" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:52 +msgid "\\[Alternative 1\\]" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:56 +msgid "\\[Alternative 2\\]" +msgstr "" + +#: ../../source/fed/0000-20200102-fed-template.md:60 +msgid "Appendix" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:10 +msgid "Flower Enhancement Doc" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:20 +msgid "[Enhancement Doc Template](#enhancement-doc-template)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:21 +msgid "[Metadata](#metadata)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:22 +msgid "[Workflow](#workflow)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:25 +msgid "[GitHub Issues](#github-issues)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:26 +msgid "[Google Docs](#google-docs)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:30 +msgid "A Flower Enhancement is a standardized development process to" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:32 +msgid "provide a common structure for proposing larger changes" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:33 +msgid "ensure that the motivation for a change is clear" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:34 +msgid "persist project information in a version control system" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:35 +msgid "document the motivation for impactful user-facing changes" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:36 +msgid "reserve GitHub issues for tracking work in flight" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:37 +msgid "" +"ensure community participants can successfully drive changes to " +"completion across one or more releases while stakeholders are adequately " +"represented throughout the process" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:39 +msgid "Hence, an Enhancement Doc combines aspects of" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:41 +msgid "a feature, and effort-tracking document" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:42 +msgid "a product requirements document" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:43 +msgid "a design document" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:45 +msgid "" +"into one file, which is created incrementally in collaboration with the " +"community." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:49 +msgid "" +"For far-fetching changes or features proposed to Flower, an abstraction " +"beyond a single GitHub issue or pull request is required to understand " +"and communicate upcoming changes to the project." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:51 +msgid "" +"The purpose of this process is to reduce the amount of \"tribal " +"knowledge\" in our community. By moving decisions from Slack threads, " +"video calls, and hallway conversations into a well-tracked artifact, this" +" process aims to enhance communication and discoverability." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:55 +msgid "" +"Roughly any larger, user-facing enhancement should follow the Enhancement" +" process. If an enhancement would be described in either written or " +"verbal communication to anyone besides the author or developer, then " +"consider creating an Enhancement Doc." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:57 +msgid "" +"Similarly, any technical effort (refactoring, major architectural change)" +" that will impact a large section of the development community should " +"also be communicated widely. The Enhancement process is suited for this " +"even if it will have zero impact on the typical user or operator." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:61 +msgid "" +"For small changes and additions, going through the Enhancement process " +"would be time-consuming and unnecessary. This includes, for example, " +"adding new Federated Learning algorithms, as these only add features " +"without changing how Flower works or is used." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:63 +msgid "" +"Enhancements are different from feature requests, as they are already " +"providing a laid-out path for implementation and are championed by " +"members of the community." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:67 +msgid "" +"An Enhancement is captured in a Markdown file that follows a defined " +"template and a workflow to review and store enhancement docs for " +"reference — the Enhancement Doc." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:69 +msgid "Enhancement Doc Template" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:71 +msgid "" +"Each enhancement doc is provided as a Markdown file having the following " +"structure" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:73 +msgid "Metadata (as [described below](#metadata) in form of a YAML preamble)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:74 +msgid "Title (same as in metadata)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:75 +msgid "Table of Contents (if needed)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:81 +msgid "Notes/Constraints/Caveats (optional)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:82 +msgid "Design Details (optional)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:83 +msgid "Graduation Criteria" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:84 +msgid "Upgrade/Downgrade Strategy (if applicable)" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:88 +msgid "As a reference, this document follows the above structure." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:90 +msgid "Metadata" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:92 +msgid "" +"**fed-number** (Required) The `fed-number` of the last Flower Enhancement" +" Doc + 1. With this number, it becomes easy to reference other proposals." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:94 +msgid "**title** (Required) The title of the proposal in plain language." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:96 +msgid "" +"**status** (Required) The current status of the proposal. See " +"[workflow](#workflow) for the possible states." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:98 +msgid "" +"**authors** (Required) A list of authors of the proposal. This is simply " +"the GitHub ID." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:100 +msgid "" +"**creation-date** (Required) The date that the proposal was first " +"submitted in a PR." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:102 +msgid "" +"**last-updated** (Optional) The date that the proposal was last changed " +"significantly." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:104 +msgid "" +"**see-also** (Optional) A list of other proposals that are relevant to " +"this one." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:106 +msgid "**replaces** (Optional) A list of proposals that this one replaces." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:108 +msgid "**superseded-by** (Optional) A list of proposals that this one supersedes." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:111 +msgid "Workflow" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:113 +msgid "" +"The idea forming the enhancement should already have been discussed or " +"pitched in the community. As such, it needs a champion, usually the " +"author, who shepherds the enhancement. This person also has to find " +"committers to Flower willing to review the proposal." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:115 +msgid "" +"New enhancements are checked in with a file name in the form of `NNNN-" +"YYYYMMDD-enhancement-title.md`, with `NNNN` being the Flower Enhancement " +"Doc number, to `enhancements`. All enhancements start in `provisional` " +"state as part of a pull request. Discussions are done as part of the pull" +" request review." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:117 +msgid "" +"Once an enhancement has been reviewed and approved, its status is changed" +" to `implementable`. The actual implementation is then done in separate " +"pull requests. These pull requests should mention the respective " +"enhancement as part of their description. After the implementation is " +"done, the proposal status is changed to `implemented`." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:119 +msgid "" +"Under certain conditions, other states are possible. An Enhancement has " +"the following states:" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:121 +msgid "" +"`provisional`: The enhancement has been proposed and is actively being " +"defined. This is the starting state while the proposal is being fleshed " +"out and actively defined and discussed." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:122 +msgid "`implementable`: The enhancement has been reviewed and approved." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:123 +msgid "" +"`implemented`: The enhancement has been implemented and is no longer " +"actively changed." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:124 +msgid "`deferred`: The enhancement is proposed but not actively being worked on." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:125 +msgid "" +"`rejected`: The authors and reviewers have decided that this enhancement " +"is not moving forward." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:126 +msgid "`withdrawn`: The authors have withdrawn the enhancement." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:127 +msgid "`replaced`: The enhancement has been replaced by a new enhancement." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:131 +msgid "" +"Adding an additional process to the ones already provided by GitHub " +"(Issues and Pull Requests) adds more complexity and can be a barrier for " +"potential first-time contributors." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:133 +msgid "" +"Expanding the proposal template beyond the single-sentence description " +"currently required in the features issue template may be a heavy burden " +"for non-native English speakers." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:137 +msgid "GitHub Issues" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:139 +msgid "" +"Using GitHub Issues for these kinds of enhancements is doable. One could " +"use, for example, tags, to differentiate and filter them from other " +"issues. The main issue is in discussing and reviewing an enhancement: " +"GitHub issues only have a single thread for comments. Enhancements " +"usually have multiple threads of discussion at the same time for various " +"parts of the doc. Managing these multiple discussions can be confusing " +"when using GitHub Issues." +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:141 +msgid "Google Docs" +msgstr "" + +#: ../../source/fed/0001-20220311-flower-enhancement-doc.md:143 +msgid "" +"Google Docs allow for multiple threads of discussions. But as Google Docs" +" are hosted outside the project, their discoverability by the community " +"needs to be taken care of. A list of links to all proposals has to be " +"managed and made available for the community. Compared to shipping " +"proposals as part of Flower's repository, the potential for missing links" +" is much higher." +msgstr "" + +#: ../../source/fed/index.md:1 +msgid "FED - Flower Enhancement Doc" +msgstr "" + +#: ../../source/how-to-aggregate-evaluation-results.rst:2 +msgid "Aggregate evaluation results" +msgstr "" + +#: ../../source/how-to-aggregate-evaluation-results.rst:4 +msgid "" +"The Flower server does not prescribe a way to aggregate evaluation " +"results, but it enables the user to fully customize result aggregation." +msgstr "" + +#: ../../source/how-to-aggregate-evaluation-results.rst:8 +msgid "Aggregate Custom Evaluation Results" +msgstr "" + +#: ../../source/how-to-aggregate-evaluation-results.rst:10 +msgid "" +"The same :code:`Strategy`-customization approach can be used to aggregate" +" custom evaluation results coming from individual clients. Clients can " +"return custom metrics to the server by returning a dictionary:" +msgstr "" + +#: ../../source/how-to-aggregate-evaluation-results.rst:36 +msgid "" +"The server can then use a customized strategy to aggregate the metrics " +"provided in these dictionaries:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:2 +msgid "Configure clients" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:4 +msgid "" +"Along with model parameters, Flower can send configuration values to " +"clients. Configuration values can be used for various purposes. They are," +" for example, a popular way to control client-side hyperparameters from " +"the server." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:7 +msgid "Configuration values" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:9 +msgid "" +"Configuration values are represented as a dictionary with ``str`` keys " +"and values of type ``bool``, ``bytes``, ``double`` (64-bit precision " +"float), ``int``, or ``str`` (or equivalent types in different languages)." +" Here is an example of a configuration dictionary in Python:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:20 +msgid "" +"Flower serializes these configuration dictionaries (or *config dict* for " +"short) to their ProtoBuf representation, transports them to the client " +"using gRPC, and then deserializes them back to Python dictionaries." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:24 +msgid "" +"Currently, there is no support for directly sending collection types " +"(e.g., ``Set``, ``List``, ``Map``) as values in configuration " +"dictionaries. There are several workarounds to send collections as values" +" by converting them to one of the supported value types (and converting " +"them back on the client-side)." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:26 +msgid "" +"One can, for example, convert a list of floating-point numbers to a JSON " +"string, then send the JSON string using the configuration dictionary, and" +" then convert the JSON string back to a list of floating-point numbers on" +" the client." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:30 +msgid "Configuration through built-in strategies" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:32 +msgid "" +"The easiest way to send configuration values to clients is to use a " +"built-in strategy like :code:`FedAvg`. Built-in strategies support so-" +"called configuration functions. A configuration function is a function " +"that the built-in strategy calls to get the configuration dictionary for " +"the current round. It then forwards the configuration dictionary to all " +"the clients selected during that round." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:34 +msgid "" +"Let's start with a simple example. Imagine we want to send (a) the batch " +"size that the client should use, (b) the current global round of " +"federated learning, and (c) the number of epochs to train on the client-" +"side. Our configuration function could look like this:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:47 +msgid "" +"To make the built-in strategies use this function, we can pass it to " +"``FedAvg`` during initialization using the parameter " +":code:`on_fit_config_fn`:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:56 +msgid "One the client side, we receive the configuration dictionary in ``fit``:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:67 +msgid "" +"There is also an `on_evaluate_config_fn` to configure evaluation, which " +"works the same way. They are separate functions because one might want to" +" send different configuration values to `evaluate` (for example, to use a" +" different batch size)." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:69 +msgid "" +"The built-in strategies call this function every round (that is, every " +"time `Strategy.configure_fit` or `Strategy.configure_evaluate` runs). " +"Calling `on_evaluate_config_fn` every round allows us to vary/change the " +"config dict over consecutive rounds. If we wanted to implement a " +"hyperparameter schedule, for example, to increase the number of local " +"epochs during later rounds, we could do the following:" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:82 +msgid "The :code:`FedAvg` strategy will call this function *every round*." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:85 +msgid "Configuring individual clients" +msgstr "" + +#: ../../source/how-to-configure-clients.rst:87 +msgid "" +"In some cases, it is necessary to send different configuration values to " +"different clients." +msgstr "" + +#: ../../source/how-to-configure-clients.rst:89 +msgid "" +"This can be achieved by customizing an existing strategy or by " +"`implementing a custom strategy from scratch " +"`_. " +"Here's a nonsensical example that customizes :code:`FedAvg` by adding a " +"custom ``\"hello\": \"world\"`` configuration key/value pair to the " +"config dict of a *single client* (only the first client in the list, the " +"other clients in this round to not receive this \"special\" config " +"value):" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:2 +msgid "Configure logging" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:4 +msgid "" +"The Flower logger keeps track of all core events that take place in " +"federated learning workloads. It presents information by default " +"following a standard message format:" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:13 +msgid "" +"containing relevant information including: log message level (e.g. " +":code:`INFO`, :code:`DEBUG`), a timestamp, the line where the logging " +"took place from, as well as the log message itself. In this way, the " +"logger would typically display information on your terminal as follows:" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:34 +msgid "Saving log to file" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:36 +msgid "" +"By default, the Flower log is outputted to the terminal where you launch " +"your Federated Learning workload from. This applies for both gRPC-based " +"federation (i.e. when you do :code:`fl.server.start_server`) and when " +"using the :code:`VirtualClientEngine` (i.e. when you do " +":code:`fl.simulation.start_simulation`). In some situations you might " +"want to save this log to disk. You can do so by calling the " +"`fl.common.logger.configure() " +"`_" +" function. For example:" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:53 +msgid "" +"With the above, Flower will record the log you see on your terminal to " +":code:`log.txt`. This file will be created in the same directory as were " +"you are running the code from. If we inspect we see the log above is also" +" recorded but prefixing with :code:`identifier` each line:" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:74 +msgid "Log your own messages" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:76 +msgid "" +"You might expand the information shown by default with the Flower logger " +"by adding more messages relevant to your application. You can achieve " +"this easily as follows." +msgstr "" + +#: ../../source/how-to-configure-logging.rst:102 +msgid "" +"In this way your logger will show, in addition to the default messages, " +"the ones introduced by the clients as specified above." +msgstr "" + +#: ../../source/how-to-configure-logging.rst:128 +msgid "Log to a remote service" +msgstr "" + +#: ../../source/how-to-configure-logging.rst:130 +msgid "" +"The :code:`fl.common.logger.configure` function, also allows specifying a" +" host to which logs can be pushed (via :code:`POST`) through a native " +"Python :code:`logging.handler.HTTPHandler`. This is a particularly useful" +" feature in :code:`gRPC`-based Federated Learning workloads where " +"otherwise gathering logs from all entities (i.e. the server and the " +"clients) might be cumbersome. Note that in Flower simulation, the server " +"automatically displays all logs. You can still specify a " +":code:`HTTPHandler` should you whish to backup or analyze the logs " +"somewhere else." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:2 +msgid "Enable SSL connections" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:4 +msgid "" +"This guide describes how to a SSL-enabled secure Flower server can be " +"started and how a Flower client can establish a secure connections to it." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:7 +msgid "" +"A complete code example demonstrating a secure connection can be found " +"`here `_." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:10 +msgid "" +"The code example comes with a README.md file which will explain how to " +"start it. Although it is already SSL-enabled, it might be less " +"descriptive on how. Stick to this guide for a deeper introduction to the " +"topic." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:16 +msgid "Certificates" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:18 +msgid "" +"Using SSL-enabled connections requires certificates to be passed to the " +"server and client. For the purpose of this guide we are going to generate" +" self-signed certificates. As this can become quite complex we are going " +"to ask you to run the script in :code:`examples/advanced-" +"tensorflow/certificates/generate.sh`" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:23 +msgid "with the following command sequence:" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:30 +msgid "" +"This will generate the certificates in :code:`examples/advanced-" +"tensorflow/.cache/certificates`." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:32 +msgid "" +"The approach how the SSL certificates are generated in this example can " +"serve as an inspiration and starting point but should not be taken as " +"complete for production environments. Please refer to other sources " +"regarding the issue of correctly generating certificates for production " +"environments." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:36 +msgid "" +"In case you are a researcher you might be just fine using the self-signed" +" certificates generated using the scripts which are part of this guide." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:41 +msgid "Server" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:43 +msgid "" +"We are now going to show how to write a sever which uses the previously " +"generated scripts." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:61 +msgid "" +"When providing certificates, the server expects a tuple of three " +"certificates. :code:`Path` can be used to easily read the contents of " +"those files into byte strings, which is the data type " +":code:`start_server` expects." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:65 +#: ../../source/how-to-upgrade-to-flower-1.0.rst:37 +#: ../../source/ref-api-flwr.rst:15 +msgid "Client" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:67 +msgid "" +"We are now going to show how to write a client which uses the previously " +"generated scripts:" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:84 +msgid "" +"When setting :code:`root_certificates`, the client expects the PEM-" +"encoded root certificates as a byte string. We are again using " +":code:`Path` to simplify reading those as byte strings." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:89 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:287 +msgid "Conclusion" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:91 +msgid "" +"You should now have learned how to generate self-signed certificates " +"using the given script, start a SSL-enabled server, and have a client " +"establish a secure connection to it." +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:96 +msgid "Additional resources" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:98 +msgid "" +"These additional sources might be relevant if you would like to dive " +"deeper into the topic of certificates:" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:100 +msgid "`Let's Encrypt `_" +msgstr "" + +#: ../../source/how-to-enable-ssl-connections.rst:101 +msgid "`certbot `_" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:2 +msgid "Implement strategies" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:4 +msgid "" +"The strategy abstraction enables implementation of fully custom " +"strategies. A strategy is basically the federated learning algorithm that" +" runs on the server. Strategies decide how to sample clients, how to " +"configure clients for training, how to aggregate updates, and how to " +"evaluate models. Flower provides a few built-in strategies which are " +"based on the same API described below." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:11 +msgid "The :code:`Strategy` abstraction" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:13 +msgid "" +"All strategy implementation are derived from the abstract base class " +":code:`flwr.server.strategy.Strategy`, both built-in implementations and " +"third party implementations. This means that custom strategy " +"implementations have the exact same capabilities at their disposal as " +"built-in ones." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:18 +msgid "" +"The strategy abstraction defines a few abstract methods that need to be " +"implemented:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:75 +msgid "" +"Creating a new strategy means implementing a new :code:`class` (derived " +"from the abstract base class :code:`Strategy`) that implements for the " +"previously shown abstract methods:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:100 +msgid "The Flower server calls these methods in the following order:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:177 +msgid "The following sections describe each of those methods in more detail." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:180 +msgid "The :code:`initialize_parameters` method" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:182 +msgid "" +":code:`initialize_parameters` is called only once, at the very beginning " +"of an execution. It is responsible for providing the initial global model" +" parameters in a serialized form (i.e., as a :code:`Parameters` object)." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:184 +msgid "" +"Built-in strategies return user-provided initial parameters. The " +"following example shows how initial parameters can be passed to " +":code:`FedAvg`:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:209 +msgid "" +"The Flower server will call :code:`initialize_parameters`, which either " +"returns the parameters that were passed to :code:`initial_parameters`, or" +" :code:`None`. If no parameters are returned from " +":code:`initialize_parameters` (i.e., :code:`None`), the server will " +"randomly select one client and ask it to provide its parameters. This is " +"a convenience feature and not recommended in practice, but it can be " +"useful for prototyping. In practice, it is recommended to always use " +"server-side parameter initialization." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:213 +msgid "" +"Server-side parameter initialization is a powerful mechanism. It can be " +"used, for example, to resume training from a previously saved checkpoint." +" It is also the fundamental capability needed to implement hybrid " +"approaches, for example, to fine-tune a pre-trained model using federated" +" learning." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:216 +msgid "The :code:`configure_fit` method" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:218 +msgid "" +":code:`configure_fit` is responsible for configuring the upcoming round " +"of training. What does *configure* mean in this context? Configuring a " +"round means selecting clients and deciding what instructions to send to " +"these clients. The signature of :code:`configure_fit` makes this clear:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:231 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_fit`:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:233 +#: ../../source/how-to-implement-strategies.rst:280 +msgid "" +"Use the :code:`client_manager` to randomly sample all (or a subset of) " +"available clients (each represented as a :code:`ClientProxy` object)" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:234 +msgid "" +"Pair each :code:`ClientProxy` with the same :code:`FitIns` holding the " +"current global model :code:`parameters` and :code:`config` dict" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:236 +msgid "" +"More sophisticated implementations can use :code:`configure_fit` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"the list returned from :code:`configure_fit`." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:240 +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to train, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:243 +msgid "The :code:`aggregate_fit` method" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:245 +msgid "" +":code:`aggregate_fit` is responsible for aggregating the results returned" +" by the clients that were selected and asked to train in " +":code:`configure_fit`." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:258 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_fit`). :code:`aggregate_fit` therefore receives a list " +"of :code:`results`, but also a list of :code:`failures`." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:260 +msgid "" +":code:`aggregate_fit` returns an optional :code:`Parameters` object and a" +" dictionary of aggregated metrics. The :code:`Parameters` return value is" +" optional because :code:`aggregate_fit` might decide that the results " +"provided are not sufficient for aggregation (e.g., too many failures)." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:263 +msgid "The :code:`configure_evaluate` method" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:265 +msgid "" +":code:`configure_evaluate` is responsible for configuring the upcoming " +"round of evaluation. What does *configure* mean in this context? " +"Configuring a round means selecting clients and deciding what " +"instructions to send to these clients. The signature of " +":code:`configure_evaluate` makes this clear:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:278 +msgid "" +"The return value is a list of tuples, each representing the instructions " +"that will be sent to a particular client. Strategy implementations " +"usually perform the following steps in :code:`configure_evaluate`:" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:281 +msgid "" +"Pair each :code:`ClientProxy` with the same :code:`EvaluateIns` holding " +"the current global model :code:`parameters` and :code:`config` dict" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:283 +msgid "" +"More sophisticated implementations can use :code:`configure_evaluate` to " +"implement custom client selection logic. A client will only participate " +"in a round if the corresponding :code:`ClientProxy` is included in the " +"the list returned from :code:`configure_evaluate`." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:287 +msgid "" +"The structure of this return value provides a lot of flexibility to the " +"user. Since instructions are defined on a per-client basis, different " +"instructions can be sent to each client. This enables custom strategies " +"to evaluate, for example, different models on different clients, or use " +"different hyperparameters on different clients (via the :code:`config` " +"dict)." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:291 +msgid "The :code:`aggregate_evaluate` method" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:293 +msgid "" +":code:`aggregate_evaluate` is responsible for aggregating the results " +"returned by the clients that were selected and asked to evaluate in " +":code:`configure_evaluate`." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:306 +msgid "" +"Of course, failures can happen, so there is no guarantee that the server " +"will get results from all the clients it sent instructions to (via " +":code:`configure_evaluate`). :code:`aggregate_evaluate` therefore " +"receives a list of :code:`results`, but also a list of :code:`failures`." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:308 +msgid "" +":code:`aggregate_evaluate` returns an optional :code:`float` (loss) and a" +" dictionary of aggregated metrics. The :code:`float` return value is " +"optional because :code:`aggregate_evaluate` might decide that the results" +" provided are not sufficient for aggregation (e.g., too many failures)." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:311 +msgid "The :code:`evaluate` method" +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:313 +msgid "" +":code:`evaluate` is responsible for evaluating model parameters on the " +"server-side. Having :code:`evaluate` in addition to " +":code:`configure_evaluate`/:code:`aggregate_evaluate` enables strategies " +"to perform both servers-side and client-side (federated) evaluation." +msgstr "" + +#: ../../source/how-to-implement-strategies.rst:323 +msgid "" +"The return value is again optional because the strategy might not need to" +" implement server-side evaluation or because the user-defined " +":code:`evaluate` method might not complete successfully (e.g., it might " +"fail to load the server-side evaluation data)." +msgstr "" + +#: ../../source/how-to-install-flower.rst:2 +msgid "Install Flower" +msgstr "" + +#: ../../source/how-to-install-flower.rst:6 +msgid "Python version" +msgstr "" + +#: ../../source/how-to-install-flower.rst:12 +msgid "Install stable release" +msgstr "" + +#: ../../source/how-to-install-flower.rst:14 +msgid "" +"Stable releases are available on `PyPI " +"`_::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:18 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` should be " +"installed with the ``simulation`` extra::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:24 +msgid "Verify installation" +msgstr "" + +#: ../../source/how-to-install-flower.rst:26 +msgid "" +"The following command can be used to verfiy if Flower was successfully " +"installed. If everything worked, it should print the version of Flower to" +" the command line::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:33 +msgid "Advanced installation options" +msgstr "" + +#: ../../source/how-to-install-flower.rst:36 +msgid "Install pre-release" +msgstr "" + +#: ../../source/how-to-install-flower.rst:38 +msgid "" +"New (possibly unstable) versions of Flower are sometimes available as " +"pre-release versions (alpha, beta, release candidate) before the stable " +"release happens::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:42 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr`` pre-releases" +" should be installed with the ``simulation`` extra::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:47 +msgid "Install nightly release" +msgstr "" + +#: ../../source/how-to-install-flower.rst:49 +msgid "" +"The latest (potentially unstable) changes in Flower are available as " +"nightly releases::" +msgstr "" + +#: ../../source/how-to-install-flower.rst:53 +msgid "" +"For simulations that use the Virtual Client Engine, ``flwr-nightly`` " +"should be installed with the ``simulation`` extra::" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:2 +msgid "Monitor simulation" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:4 +msgid "" +"Flower allows you to monitor system resources while running your " +"simulation. Moreover, the Flower simulation engine is powerful and " +"enables you to decide how to allocate resources per client manner and " +"constrain the total usage. Insights from resource consumption can help " +"you make smarter decisions and speed up the execution time." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:6 +msgid "" +"The specific instructions assume you are using macOS and have the " +"`Homebrew `_ package manager installed." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:10 +msgid "Downloads" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:16 +msgid "" +"`Prometheus `_ is used for data collection, while" +" `Grafana `_ will enable you to visualize the " +"collected data. They are both well integrated with `Ray " +"`_ which Flower uses under the hood." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:18 +msgid "" +"Overwrite the configuration files (depending on your device, it might be " +"installed on a different path)." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:20 +msgid "If you are on an M1 Mac, it should be:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:27 +msgid "On the previous generation Intel Mac devices, it should be:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:34 +msgid "" +"Open the respective configuration files and change them. Depending on " +"your device, use one of the two following commands:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:44 +msgid "" +"and then delete all the text in the file and paste a new Prometheus " +"config you see below. You may adjust the time intervals to your " +"requirements:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:59 +msgid "" +"Now after you have edited the Prometheus configuration, do the same with " +"the Grafana configuration files. Open those using one of the following " +"commands as before:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:69 +msgid "" +"Your terminal editor should open and allow you to apply the following " +"configuration as before." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:84 +msgid "" +"Congratulations, you just downloaded all the necessary software needed " +"for metrics tracking. Now, let’s start it." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:88 +msgid "Tracking metrics" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:90 +msgid "" +"Before running your Flower simulation, you have to start the monitoring " +"tools you have just installed and configured." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:97 +msgid "" +"Please include the following argument in your Python code when starting a" +" simulation." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:108 +msgid "Now, you are ready to start your workload." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:110 +msgid "" +"Shortly after the simulation starts, you should see the following logs in" +" your terminal:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:117 +msgid "You can look at everything at ``_ ." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:119 +msgid "" +"It's a Ray Dashboard. You can navigate to Metrics (on the left panel, the" +" lowest option)." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:121 +msgid "" +"Or alternatively, you can just see them in Grafana by clicking on the " +"right-up corner, “View in Grafana”. Please note that the Ray dashboard is" +" only accessible during the simulation. After the simulation ends, you " +"can only use Grafana to explore the metrics. You can start Grafana by " +"going to ``http://localhost:3000/``." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:123 +msgid "" +"After you finish the visualization, stop Prometheus and Grafana. This is " +"important as they will otherwise block, for example port :code:`3000` on " +"your machine as long as they are running." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:132 +msgid "Resource allocation" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:134 +msgid "" +"You must understand how the Ray library works to efficiently allocate " +"system resources to simulation clients on your own." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:136 +msgid "" +"Initially, the simulation (which Ray handles under the hood) starts by " +"default with all the available resources on the system, which it shares " +"among the clients. It doesn't mean it divides it equally among all of " +"them, nor that the model training happens at all of them simultaneously. " +"You will learn more about that in the later part of this blog. You can " +"check the system resources by running the following:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:143 +msgid "In Google Colab, the result you see might be similar to this:" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:155 +msgid "" +"However, you can overwrite the defaults. When starting a simulation, do " +"the following (you don't need to overwrite all of them):" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:175 +msgid "Let’s also specify the resource for a single client." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:205 +msgid "" +"Now comes the crucial part. Ray will start a new client only when it has " +"all the required resources (such that they run in parallel) when the " +"resources allow." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:207 +msgid "" +"In the example above, only one client will be run, so your clients won't " +"run concurrently. Setting :code:`client_num_gpus = 0.5` would allow " +"running two clients and therefore enable them to run concurrently. Be " +"careful not to require more resources than available. If you specified " +":code:`client_num_gpus = 2`, the simulation wouldn't start (even if you " +"had 2 GPUs but decided to set 1 in :code:`ray_init_args`)." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:212 ../../source/ref-faq.rst:2 +msgid "FAQ" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:214 +msgid "Q: I don't see any metrics logged." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:216 +msgid "" +"A: The timeframe might not be properly set. The setting is in the top " +"right corner (\"Last 30 minutes\" by default). Please change the " +"timeframe to reflect the period when the simulation was running." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:218 +msgid "" +"Q: I see “Grafana server not detected. Please make sure the Grafana " +"server is running and refresh this page” after going to the Metrics tab " +"in Ray Dashboard." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:220 +msgid "" +"A: You probably don't have Grafana running. Please check the running " +"services" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:226 +msgid "" +"Q: I see \"This site can't be reached\" when going to " +"``_." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:228 +msgid "" +"A: Either the simulation has already finished, or you still need to start" +" Prometheus." +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:232 +msgid "Resources" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:234 +msgid "" +"Ray Dashboard: ``_" +msgstr "" + +#: ../../source/how-to-monitor-simulation.rst:236 +msgid "" +"Ray Metrics: ``_" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:2 +msgid "Run simulations" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:8 +msgid "" +"Simulating Federated Learning workloads is useful for a multitude of use-" +"cases: you might want to run your workload on a large cohort of clients " +"but without having to source, configure and mange a large number of " +"physical devices; you might want to run your FL workloads as fast as " +"possible on the compute systems you have access to without having to go " +"through a complex setup process; you might want to validate your " +"algorithm on different scenarios at varying levels of data and system " +"heterogeneity, client availability, privacy budgets, etc. These are among" +" some of the use-cases where simulating FL workloads makes sense. Flower " +"can accommodate these scenarios by means of its `VirtualClientEngine " +"`_ or " +"VCE." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:10 +msgid "" +"The :code:`VirtualClientEngine` schedules, launches and manages `virtual`" +" clients. These clients are identical to `non-virtual` clients (i.e. the " +"ones you launch via the command `flwr.client.start_numpy_client `_) in the sense that they can be configure " +"by creating a class inheriting, for example, from " +"`flwr.client.NumPyClient `_ " +"and therefore behave in an identical way. In addition to that, clients " +"managed by the :code:`VirtualClientEngine` are:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:12 +msgid "" +"resource-aware: this means that each client gets assigned a portion of " +"the compute and memory on your system. You as a user can control this at " +"the beginning of the simulation and allows you to control the degree of " +"parallelism of your Flower FL simulation. The fewer the resources per " +"client, the more clients can run concurrently on the same hardware." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:13 +msgid "" +"self-managed: this means that you as a user do not need to launch clients" +" manually, instead this gets delegated to :code:`VirtualClientEngine`'s " +"internals." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:14 +msgid "" +"ephemeral: this means that a client is only materialized when it is " +"required in the FL process (e.g. to do `fit() `_). The object is destroyed afterwards," +" releasing the resources it was assigned and allowing in this way other " +"clients to participate." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:16 +msgid "" +"The :code:`VirtualClientEngine` implements `virtual` clients using `Ray " +"`_, an open-source framework for scalable Python " +"workloads. In particular, Flower's :code:`VirtualClientEngine` makes use " +"of `Actors `_ to " +"spawn `virtual` clients and run their workload." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:20 +msgid "Launch your Flower simulation" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:22 +msgid "" +"Running Flower simulations still require you to define your client class," +" a strategy, and utility functions to download and load (and potentially " +"partition) your dataset. With that out of the way, launching your " +"simulation is done with `start_simulation `_ and a minimal example looks" +" as follows:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:44 +msgid "VirtualClientEngine resources" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:45 +msgid "" +"By default the VCE has access to all system resources (i.e. all CPUs, all" +" GPUs, etc) since that is also the default behavior when starting Ray. " +"However, in some settings you might want to limit how many of your system" +" resources are used for simulation. You can do this via the " +":code:`ray_init_args` input argument to :code:`start_simulation` which " +"the VCE internally passes to Ray's :code:`ray.init` command. For a " +"complete list of settings you can configure check the `ray.init " +"`_" +" documentation. Do not set :code:`ray_init_args` if you want the VCE to " +"use all your system's CPUs and GPUs." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:62 +msgid "Assigning client resources" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:63 +msgid "" +"By default the :code:`VirtualClientEngine` assigns a single CPU core (and" +" nothing else) to each virtual client. This means that if your system has" +" 10 cores, that many virtual clients can be concurrently running." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:65 +msgid "" +"More often than not, you would probably like to adjust the resources your" +" clients get assigned based on the complexity (i.e. compute and memory " +"footprint) of your FL workload. You can do so when starting your " +"simulation by setting the argument `client_resources` to " +"`start_simulation `_." +" Two keys are internally used by Ray to schedule and spawn workloads (in " +"our case Flower clients):" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:67 +msgid ":code:`num_cpus` indicates the number of CPU cores a client would get." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:68 +msgid "" +":code:`num_gpus` indicates the **ratio** of GPU memory a client gets " +"assigned." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:70 +msgid "Let's see a few examples:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:89 +msgid "" +"While the :code:`client_resources` can be used to control the degree of " +"concurrency in your FL simulation, this does not stop you from running " +"dozens, hundreds or even thousands of clients in the same round and " +"having orders of magnitude more `dormant` (i.e. not participating in a " +"round) clients. Let's say you want to have 100 clients per round but your" +" system can only accommodate 8 clients concurrently. The " +":code:`VirtualClientEngine` will schedule 100 jobs to run (each " +"simulating a client sampled by the strategy) and then will execute them " +"in a resource-aware manner in batches of 8." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:91 +msgid "" +"To understand all the intricate details on how resources are used to " +"schedule FL clients and how to define custom resources, please take a " +"look at the `Ray documentation `_." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:94 +msgid "Simulation examples" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:96 +msgid "" +"A few ready-to-run complete examples for Flower simulation in " +"Tensorflow/Keras and PyTorch are provided in the `Flower repository " +"`_. You can run them on Google Colab too:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:98 +msgid "" +"`Tensorflow/Keras Simulation " +"`_: 100 clients collaboratively train a MLP model on MNIST." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:99 +msgid "" +"`PyTorch Simulation `_: 100 clients collaboratively train a CNN model on " +"MNIST." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:104 +msgid "Multi-node Flower simulations" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:106 +msgid "" +"Flower's :code:`VirtualClientEngine` allows you to run FL simulations " +"across multiple compute nodes. Before starting your multi-node simulation" +" ensure that you:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:108 +msgid "Have the same Python environment in all nodes." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:109 +msgid "Have a copy of your code (e.g. your entire repo) in all nodes." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:110 +msgid "" +"Have a copy of your dataset in all nodes (more about this in " +":ref:`simulation considerations `)" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:111 +msgid "" +"Pass :code:`ray_init_args={\"address\"=\"auto\"}` to `start_simulation " +"`_ so the " +":code:`VirtualClientEngine` attaches to a running Ray instance." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:112 +msgid "" +"Start Ray on you head node: on the terminal type :code:`ray start " +"--head`. This command will print a few lines, one of which indicates how " +"to attach other nodes to the head node." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:113 +msgid "" +"Attach other nodes to the head node: copy the command shown after " +"starting the head and execute it on terminal of a new node: for example " +":code:`ray start --address='192.168.1.132:6379'`" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:115 +msgid "" +"With all the above done, you can run your code from the head node as you " +"would if the simulation was running on a single node." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:117 +msgid "" +"Once your simulation is finished, if you'd like to dismantle your cluster" +" you simply need to run the command :code:`ray stop` in each node's " +"terminal (including the head node)." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:120 +msgid "Multi-node simulation good-to-know" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:122 +msgid "" +"Here we list a few interesting functionality when running multi-node FL " +"simulations:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:124 +msgid "" +"User :code:`ray status` to check all nodes connected to your head node as" +" well as the total resources available to the " +":code:`VirtualClientEngine`." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:126 +msgid "" +"When attaching a new node to the head, all its resources (i.e. all CPUs, " +"all GPUs) will be visible by the head node. This means that the " +":code:`VirtualClientEngine` can schedule as many `virtual` clients as " +"that node can possible run. In some settings you might want to exclude " +"certain resources from the simulation. You can do this by appending " +"`--num-cpus=` and/or `--num-" +"gpus=` in any :code:`ray start` command (including " +"when starting the head)" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:132 +msgid "Considerations for simulations" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:135 +msgid "" +"We are actively working on these fronts so to make it trivial to run any " +"FL workload with Flower simulation." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:138 +msgid "" +"The current VCE allows you to run Federated Learning workloads in " +"simulation mode whether you are prototyping simple scenarios on your " +"personal laptop or you want to train a complex FL pipeline across " +"multiple high-performance GPU nodes. While we add more capabilities to " +"the VCE, the points below highlight some of the considerations to keep in" +" mind when designing your FL pipeline with Flower. We also highlight a " +"couple of current limitations in our implementation." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:141 +msgid "GPU resources" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:143 +msgid "" +"The VCE assigns a share of GPU memory to a client that specifies the key " +":code:`num_gpus` in :code:`client_resources`. This being said, Ray (used " +"internally by the VCE) is by default:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:146 +msgid "" +"not aware of the total VRAM available on the GPUs. This means that if you" +" set :code:`num_gpus=0.5` and you have two GPUs in your system with " +"different (e.g. 32GB and 8GB) VRAM amounts, they both would run 2 clients" +" concurrently." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:147 +msgid "" +"not aware of other unrelated (i.e. not created by the VCE) workloads are " +"running on the GPU. Two takeaways from this are:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:149 +msgid "" +"Your Flower server might need a GPU to evaluate the `global model` after " +"aggregation (by instance when making use of the `evaluate method `_)" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:150 +msgid "" +"If you want to run several independent Flower simulations on the same " +"machine you need to mask-out your GPUs with " +":code:`CUDA_VISIBLE_DEVICES=\"\"` when launching your " +"experiment." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:153 +msgid "" +"In addition, the GPU resource limits passed to :code:`client_resources` " +"are not `enforced` (i.e. they can be exceeded) which can result in the " +"situation of client using more VRAM than the ratio specified when " +"starting the simulation." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:156 +msgid "TensorFlow with GPUs" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:158 +msgid "" +"When `using a GPU with TensorFlow " +"`_ nearly your entire GPU memory of" +" all your GPUs visible to the process will be mapped. This is done by " +"TensorFlow for optimization purposes. However, in settings such as FL " +"simulations where we want to split the GPU into multiple `virtual` " +"clients, this is not a desirable mechanism. Luckily we can disable this " +"default behavior by `enabling memory growth " +"`_." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:160 +msgid "" +"This would need to be done in the main process (which is where the server" +" would run) and in each Actor created by the VCE. By means of " +":code:`actor_kwargs` we can pass the reserved key `\"on_actor_init_fn\"` " +"in order to specify a function to be executed upon actor initialization. " +"In this case, to enable GPU growth for TF workloads. It would look as " +"follows:" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:179 +msgid "" +"This is precisely the mechanism used in `Tensorflow/Keras Simulation " +"`_ example." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:183 +msgid "Multi-node setups" +msgstr "" + +#: ../../source/how-to-run-simulations.rst:185 +msgid "" +"The VCE does not currently offer a way to control on which node a " +"particular `virtual` client is executed. In other words, if more than a " +"single node have the resources needed by a client to run, then any of " +"those nodes could get the client workload scheduled onto. Later in the FL" +" process (i.e. in a different round) the same client could be executed by" +" a different node. Depending on how your clients access their datasets, " +"this might require either having a copy of all dataset partitions on all " +"nodes or a dataset serving mechanism (e.g. using nfs, a database) to " +"circumvent data duplication." +msgstr "" + +#: ../../source/how-to-run-simulations.rst:187 +msgid "" +"By definition virtual clients are `stateless` due to their ephemeral " +"nature. A client state can be implemented as part of the Flower client " +"class but users need to ensure this saved to persistent storage (e.g. a " +"database, disk) and that can be retrieve later by the same client " +"regardless on which node it is running from. This is related to the point" +" above also since, in some way, the client's dataset could be seen as a " +"type of `state`." +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:2 +msgid "Save and load model checkpoints" +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:4 +msgid "" +"Flower does not automatically save model updates on the server-side. This" +" how-to guide describes the steps to save (and load) model checkpoints in" +" Flower." +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:8 +msgid "Model checkpointing" +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:10 +msgid "" +"Model updates can be persisted on the server-side by customizing " +":code:`Strategy` methods. Implementing custom strategies is always an " +"option, but for many cases it may be more convenient to simply customize " +"an existing strategy. The following code example defines a new " +":code:`SaveModelStrategy` which customized the existing built-in " +":code:`FedAvg` strategy. In particular, it customizes " +":code:`aggregate_fit` by calling :code:`aggregate_fit` in the base class " +"(:code:`FedAvg`). It then continues to save returned (aggregated) weights" +" before it returns those aggregated weights to the caller (i.e., the " +"server):" +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:47 +msgid "Save and load PyTorch checkpoints" +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:49 +msgid "" +"Similar to the previous example but with a few extra steps, we'll show " +"how to store a PyTorch checkpoint we'll use the ``torch.save`` function. " +"Firstly, ``aggregate_fit`` returns a ``Parameters`` object that has to be" +" transformed into a list of NumPy ``ndarray``'s, then those are " +"transformed into the PyTorch ``state_dict`` following the ``OrderedDict``" +" class structure." +msgstr "" + +#: ../../source/how-to-save-and-load-model-checkpoints.rst:85 +msgid "" +"To load your progress, you simply append the following lines to your " +"code. Note that this will iterate over all saved checkpoints and load the" +" latest one:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:2 +msgid "Upgrade to Flower 1.0" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:4 +msgid "" +"Flower 1.0 is here. Along with new features, Flower 1.0 provides a stable" +" foundation for future growth. Compared to Flower 0.19 (and other 0.x " +"series releases), there are a few breaking changes that make it necessary" +" to change the code of existing 0.x-series projects." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:8 +msgid "Install update" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:10 +msgid "" +"Here's how to update an existing installation to Flower 1.0 using either " +"pip or Poetry:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:12 +msgid "pip: add ``-U`` when installing." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:14 +msgid "" +"``python -m pip install -U flwr`` (when using ``start_server`` and " +"``start_client``)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:15 +msgid "" +"``python -m pip install -U flwr[simulation]`` (when using " +"``start_simulation``)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:17 +msgid "" +"Poetry: update the ``flwr`` dependency in ``pyproject.toml`` and then " +"reinstall (don't forget to delete ``poetry.lock`` via ``rm poetry.lock`` " +"before running ``poetry install``)." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:19 +msgid "``flwr = \"^1.0.0\"`` (when using ``start_server`` and ``start_client``)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:20 +msgid "" +"``flwr = { version = \"^1.0.0\", extras = [\"simulation\"] }`` (when " +"using ``start_simulation``)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:24 +msgid "Required changes" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:26 +msgid "The following breaking changes require manual updates." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:29 +msgid "General" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:31 +msgid "" +"Pass all arguments as keyword arguments (not as positional arguments). " +"Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:33 +msgid "" +"Flower 0.19 (positional arguments): ``start_client(\"127.0.0.1:8080\", " +"FlowerClient())``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:34 +msgid "" +"Flower 1.0 (keyword arguments): " +"``start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:39 +msgid "" +"Subclasses of ``NumPyClient``: change ``def get_parameters(self):``` to " +"``def get_parameters(self, config):``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:40 +msgid "" +"Subclasses of ``Client``: change ``def get_parameters(self):``` to ``def " +"get_parameters(self, ins: GetParametersIns):``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:43 +msgid "Strategies / ``start_server`` / ``start_simulation``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:45 +msgid "" +"Pass ``ServerConfig`` (instead of a dictionary) to ``start_server`` and " +"``start_simulation``. Here's an example:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:47 +msgid "" +"Flower 0.19: ``start_server(..., config={\"num_rounds\": 3, " +"\"round_timeout\": 600.0}, ...)``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:48 +msgid "" +"Flower 1.0: ``start_server(..., " +"config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " +"...)``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:50 +msgid "" +"Replace ``num_rounds=1`` in ``start_simulation`` with the new " +"``config=ServerConfig(...)`` (see previous item)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:51 +msgid "" +"Remove ``force_final_distributed_eval`` parameter from calls to " +"``start_server``. Distributed evaluation on all clients can be enabled by" +" configuring the strategy to sample all clients for evaluation after the " +"last round of training." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:52 +msgid "Rename parameter/ndarray conversion functions:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:54 +msgid "``parameters_to_weights`` --> ``parameters_to_ndarrays``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:55 +msgid "``weights_to_parameters`` --> ``ndarrays_to_parameters``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:57 +msgid "" +"Strategy initialization: if the strategy relies on the default values for" +" ``fraction_fit`` and ``fraction_evaluate``, set ``fraction_fit`` and " +"``fraction_evaluate`` manually to ``0.1``. Projects that do not manually " +"create a strategy (by calling ``start_server`` or ``start_simulation`` " +"without passing a strategy instance) should now manually initialize " +"FedAvg with ``fraction_fit`` and ``fraction_evaluate`` set to ``0.1``." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:58 +msgid "Rename built-in strategy parameters (e.g., ``FedAvg``):" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:60 +msgid "``fraction_eval`` --> ``fraction_evaluate``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:61 +msgid "``min_eval_clients`` --> ``min_evaluate_clients``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:62 +msgid "``eval_fn`` --> ``evaluate_fn``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:64 +msgid "" +"Rename ``rnd`` to ``server_round``. This impacts multiple methods and " +"functions, for example, ``configure_fit``, ``aggregate_fit``, " +"``configure_evaluate``, ``aggregate_evaluate``, and ``evaluate_fn``." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:65 +msgid "Add ``server_round`` and ``config`` to ``evaluate_fn``:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:67 +msgid "" +"Flower 0.19: ``def evaluate(parameters: NDArrays) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:68 +msgid "" +"Flower 1.0: ``def evaluate(server_round: int, parameters: NDArrays, " +"config: Dict[str, Scalar]) -> Optional[Tuple[float, Dict[str, " +"Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:71 +msgid "Custom strategies" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:73 +msgid "" +"The type of parameter ``failures`` has changed from " +"``List[BaseException]`` to ``List[Union[Tuple[ClientProxy, FitRes], " +"BaseException]]`` (in ``aggregate_fit``) and " +"``List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]]`` (in " +"``aggregate_evaluate``)" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:74 +msgid "" +"The ``Strategy`` method ``evaluate`` now receives the current round of " +"federated learning/evaluation as the first parameter:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:76 +msgid "" +"Flower 0.19: ``def evaluate(self, parameters: Parameters) -> " +"Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:77 +msgid "" +"Flower 1.0: ``def evaluate(self, server_round: int, parameters: " +"Parameters) -> Optional[Tuple[float, Dict[str, Scalar]]]:``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:80 +msgid "Optional improvements" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:82 +msgid "" +"Along with the necessary changes above, there are a number of potential " +"improvements that just became possible:" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:84 +msgid "" +"Remove \"placeholder\" methods from subclasses of ``Client`` or " +"``NumPyClient``. If you, for example, use server-side evaluation, then " +"empy placeholder implementations of ``evaluate`` are no longer necessary." +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:85 +msgid "" +"Configure the round timeout via ``start_simulation``: " +"``start_simulation(..., config=flwr.server.ServerConfig(num_rounds=3, " +"round_timeout=600.0), ...)``" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:89 +msgid "Further help" +msgstr "" + +#: ../../source/how-to-upgrade-to-flower-1.0.rst:91 +msgid "" +"Most official `Flower code examples " +"`_ are already updated" +" to Flower 1.0, they can serve as a reference for using the Flower 1.0 " +"API. If there are further questionsm, `join the Flower Slack " +"`_ and use the channgel ``#questions``." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:2 +msgid "Use strategies" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:4 +msgid "" +"Flower allows full customization of the learning process through the " +":code:`Strategy` abstraction. A number of built-in strategies are " +"provided in the core framework." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:6 +msgid "" +"There are three ways to customize the way Flower orchestrates the " +"learning process on the server side:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:8 +msgid "Use an existing strategy, for example, :code:`FedAvg`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:9 +#: ../../source/how-to-use-strategies.rst:40 +msgid "Customize an existing strategy with callback functions" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:10 +#: ../../source/how-to-use-strategies.rst:87 +msgid "Implement a novel strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:14 +msgid "Use an existing strategy" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:16 +msgid "" +"Flower comes with a number of popular federated learning strategies " +"built-in. A built-in strategy can be instantiated as follows:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:25 +msgid "" +"This creates a strategy with all parameters left at their default values " +"and passes it to the :code:`start_server` function. It is usually " +"recommended to adjust a few parameters during instantiation:" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:42 +msgid "" +"Existing strategies provide several ways to customize their behaviour. " +"Callback functions allow strategies to call user-provided code during " +"execution." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:45 +msgid "Configuring client fit and client evaluate" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:47 +msgid "" +"The server can pass new configuration values to the client each round by " +"providing a function to :code:`on_fit_config_fn`. The provided function " +"will be called by the strategy and must return a dictionary of " +"configuration key values pairs that will be sent to the client. It must " +"return a dictionary of arbitraty configuration values :code:`client.fit`" +" and :code:`client.evaluate` functions during each round of federated " +"learning." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:75 +msgid "" +"The :code:`on_fit_config_fn` can be used to pass arbitrary configuration " +"values from server to client, and poetentially change these values each " +"round, for example, to adjust the learning rate. The client will receive " +"the dictionary returned by the :code:`on_fit_config_fn` in its own " +":code:`client.fit()` function." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:78 +msgid "" +"Similar to :code:`on_fit_config_fn`, there is also " +":code:`on_evaluate_config_fn` to customize the configuration sent to " +":code:`client.evaluate()`" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:81 +msgid "Configuring server-side evaluation" +msgstr "" + +#: ../../source/how-to-use-strategies.rst:83 +msgid "" +"Server-side evaluation can be enabled by passing an evaluation function " +"to :code:`evaluate_fn`." +msgstr "" + +#: ../../source/how-to-use-strategies.rst:89 +msgid "" +"Writing a fully custom strategy is a bit more involved, but it provides " +"the most flexibility. Read the `Implementing Strategies `_ guide to learn more." +msgstr "" + +#: ../../source/index.rst:34 +msgid "Tutorial" +msgstr "" + +#: ../../source/index.rst:44 +msgid "Quickstart tutorials" +msgstr "" + +#: ../../source/index.rst:75 ../../source/index.rst:79 +msgid "How-to guides" +msgstr "" + +#: ../../source/index.rst:95 +msgid "Legacy example guides" +msgstr "" + +#: ../../source/index.rst:106 ../../source/index.rst:110 +msgid "Explanations" +msgstr "" + +#: ../../source/index.rst:122 +msgid "API reference" +msgstr "" + +#: ../../source/index.rst:129 +msgid "Reference docs" +msgstr "" + +#: ../../source/index.rst:145 +msgid "Contributor tutorials" +msgstr "" + +#: ../../source/index.rst:152 +msgid "Contributor how-to guides" +msgstr "" + +#: ../../source/index.rst:164 +msgid "Contributor explanations" +msgstr "" + +#: ../../source/index.rst:170 +msgid "Contributor references" +msgstr "" + +#: ../../source/index.rst:-1 +msgid "" +"Check out the documentation of the main Flower Framework enabling easy " +"Python development for Federated Learning." +msgstr "" + +#: ../../source/index.rst:2 +msgid "Flower Framework Documentation" +msgstr "" + +#: ../../source/index.rst:7 +msgid "" +"Welcome to Flower's documentation. `Flower `_ is a " +"friendly federated learning framework." +msgstr "" + +#: ../../source/index.rst:11 +msgid "Join the Flower Community" +msgstr "" + +#: ../../source/index.rst:13 +msgid "" +"The Flower Community is growing quickly - we're a friendly group of " +"researchers, engineers, students, professionals, academics, and other " +"enthusiasts." +msgstr "" + +#: ../../source/index.rst:15 +msgid "Join us on Slack" +msgstr "" + +#: ../../source/index.rst:23 +msgid "Flower Framework" +msgstr "" + +#: ../../source/index.rst:25 +msgid "" +"The user guide is targeted at researchers and developers who want to use " +"Flower to bring existing machine learning workloads into a federated " +"setting. One of Flower's design goals was to make this simple. Read on to" +" learn more." +msgstr "" + +#: ../../source/index.rst:30 +msgid "Tutorials" +msgstr "" + +#: ../../source/index.rst:32 +msgid "" +"A learning-oriented series of federated learning tutorials, the best " +"place to start." +msgstr "" + +#: ../../source/index.rst:62 +msgid "" +"QUICKSTART TUTORIALS: :doc:`PyTorch ` | " +":doc:`TensorFlow ` | :doc:`🤗 Transformers" +" ` | :doc:`JAX ` | :doc:`Pandas ` | :doc:`fastai " +"` | :doc:`PyTorch Lightning ` | :doc:`MXNet `" +" | :doc:`scikit-learn ` | :doc:`XGBoost " +"` | :doc:`Android ` | :doc:`iOS `" +msgstr "" + +#: ../../source/index.rst:64 +msgid "We also made video tutorials for PyTorch:" +msgstr "" + +#: ../../source/index.rst:69 +msgid "And TensorFlow:" +msgstr "" + +#: ../../source/index.rst:77 +msgid "" +"Problem-oriented how-to guides show step-by-step how to achieve a " +"specific goal." +msgstr "" + +#: ../../source/index.rst:108 +msgid "" +"Understanding-oriented concept guides explain and discuss key topics and " +"underlying ideas behind Flower and collaborative AI." +msgstr "" + +#: ../../source/index.rst:118 +msgid "References" +msgstr "" + +#: ../../source/index.rst:120 +msgid "Information-oriented API reference and other reference material." +msgstr "" + +#: ../../source/index.rst:140 +msgid "Contributor docs" +msgstr "" + +#: ../../source/index.rst:142 +msgid "" +"The Flower community welcomes contributions. The following docs are " +"intended to help along the way." +msgstr "" + +#: ../../source/ref-api-cli.rst:2 +msgid "Flower CLI reference" +msgstr "" + +#: ../../source/ref-api-cli.rst:7 +msgid "flower-server" +msgstr "" + +#: ../../source/ref-api-cli.rst:17 +msgid "flower-driver-api" +msgstr "" + +#: ../../source/ref-api-cli.rst:27 +msgid "flower-fleet-api" +msgstr "" + +#: ../../source/ref-api-flwr.rst:2 +msgid "flwr (Python API reference)" +msgstr "" + +#: ../../source/ref-api-flwr.rst:8 +msgid "client" +msgstr "" + +#: flwr.client:1 of +msgid "Flower client." +msgstr "" + +#: flwr.client.client.Client:1 of +msgid "Abstract base class for Flower clients." +msgstr "" + +#: flwr.client.client.Client.evaluate:1 +#: flwr.client.numpy_client.NumPyClient.evaluate:1 of +msgid "Evaluate the provided parameters using the locally held dataset." +msgstr "" + +#: flwr.client.app.start_client flwr.client.app.start_numpy_client +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.server.app.start_server flwr.server.strategy.bulyan.Bulyan.__init__ +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__ +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__ +#: flwr.server.strategy.fedadam.FedAdam.__init__ +#: flwr.server.strategy.fedavg.FedAvg.__init__ +#: flwr.server.strategy.fedavgm.FedAvgM.__init__ +#: flwr.server.strategy.fedopt.FedOpt.__init__ +#: flwr.server.strategy.fedprox.FedProx.__init__ +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__ +#: flwr.server.strategy.fedyogi.FedYogi.__init__ +#: flwr.server.strategy.krum.Krum.__init__ +#: flwr.server.strategy.qfedavg.QFedAvg.__init__ +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Parameters" +msgstr "" + +#: flwr.client.client.Client.evaluate:3 of +msgid "" +"The evaluation instructions containing (global) model parameters received" +" from the server and a dictionary of configuration values used to " +"customize the local evaluation process." +msgstr "" + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.server.app.start_server +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Returns" +msgstr "" + +#: flwr.client.client.Client.evaluate:8 of +msgid "" +"The evaluation result containing the loss on the local dataset and other " +"details such as the number of local data examples used for evaluation." +msgstr "" + +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.server.app.start_server +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.simulation.app.start_simulation of +msgid "Return type" +msgstr "" + +#: flwr.client.client.Client.fit:1 of +msgid "Refine the provided parameters using the locally held dataset." +msgstr "" + +#: flwr.client.client.Client.fit:3 of +msgid "" +"The training instructions containing (global) model parameters received " +"from the server and a dictionary of configuration values used to " +"customize the local training process." +msgstr "" + +#: flwr.client.client.Client.fit:8 of +msgid "" +"The training result containing updated parameters and other details such " +"as the number of local training examples used for training." +msgstr "" + +#: flwr.client.client.Client.get_parameters:1 +#: flwr.client.numpy_client.NumPyClient.get_parameters:1 of +msgid "Return the current local model parameters." +msgstr "" + +#: flwr.client.client.Client.get_parameters:3 of +msgid "" +"The get parameters instructions received from the server containing a " +"dictionary of configuration values." +msgstr "" + +#: flwr.client.client.Client.get_parameters:7 of +msgid "The current local model parameters." +msgstr "" + +#: flwr.client.client.Client.get_properties:1 of +msgid "Return set of client's properties." +msgstr "" + +#: flwr.client.client.Client.get_properties:3 of +msgid "" +"The get properties instructions received from the server containing a " +"dictionary of configuration values." +msgstr "" + +#: flwr.client.client.Client.get_properties:7 of +msgid "The current client properties." +msgstr "" + +#: flwr.client.client.Client.to_client:1 of +msgid "Return client (itself)." +msgstr "" + +#: ../../source/ref-api-flwr.rst:24 +msgid "start_client" +msgstr "" + +#: flwr.client.app.start_client:1 of +msgid "Start a Flower client node which connects to a Flower server." +msgstr "" + +#: flwr.client.app.start_client:3 flwr.client.app.start_numpy_client:3 of +msgid "" +"The IPv4 or IPv6 address of the server. If the Flower server runs on the " +"same machine on port 8080, then `server_address` would be " +"`\"[::]:8080\"`." +msgstr "" + +#: flwr.client.app.start_client:7 of +msgid "..." +msgstr "" + +#: flwr.client.app.start_client:9 of +msgid "A callable that instantiates a Client. (default: None)" +msgstr "" + +#: flwr.client.app.start_client:11 of +msgid "" +"An implementation of the abstract base class `flwr.client.Client` " +"(default: None)" +msgstr "" + +#: flwr.client.app.start_client:14 flwr.client.app.start_numpy_client:9 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" server. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower server needs to be started with the same value (see " +"`flwr.server.start_server`), otherwise it will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.client.app.start_client:21 flwr.client.app.start_numpy_client:16 of +msgid "" +"The PEM-encoded root certificates as a byte string or a path string. If " +"provided, a secure connection using the certificates will be established " +"to an SSL-enabled Flower server." +msgstr "" + +#: flwr.client.app.start_client:25 flwr.client.app.start_numpy_client:20 of +msgid "" +"Configure the transport layer. Allowed values: - 'grpc-bidi': gRPC, " +"bidirectional streaming - 'grpc-rere': gRPC, request-response " +"(experimental) - 'rest': HTTP (experimental)" +msgstr "" + +#: flwr.client.app.start_client:32 flwr.client.app.start_numpy_client:27 +#: flwr.server.app.start_server:41 of +msgid "Examples" +msgstr "" + +#: flwr.client.app.start_client:33 of +msgid "Starting a gRPC client with an insecure server connection:" +msgstr "" + +#: flwr.client.app.start_client:43 flwr.client.app.start_numpy_client:35 of +msgid "Starting an SSL-enabled gRPC client:" +msgstr "" + +#: ../../source/ref-api-flwr.rst:32 +msgid "NumPyClient" +msgstr "" + +#: flwr.client.numpy_client.NumPyClient:1 of +msgid "Abstract base class for Flower clients using NumPy." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:3 +#: flwr.client.numpy_client.NumPyClient.fit:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:5 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:8 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:5 +#: flwr.server.strategy.strategy.Strategy.configure_fit:5 +#: flwr.server.strategy.strategy.Strategy.evaluate:8 of +msgid "The current (global) model parameters." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:5 of +msgid "" +"Configuration parameters which allow the server to influence evaluation " +"on the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to influence the number of examples " +"used for evaluation." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"* **loss** (*float*) -- The evaluation loss of the model on the local " +"dataset. * **num_examples** (*int*) -- The number of examples used for " +"evaluation. * **metrics** (*Dict[str, Scalar]*) -- A dictionary mapping " +"arbitrary string keys to values of type bool, bytes, float, int, or " +"str. It can be used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:11 of +msgid "" +"**loss** (*float*) -- The evaluation loss of the model on the local " +"dataset." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:12 of +msgid "**num_examples** (*int*) -- The number of examples used for evaluation." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:13 +#: flwr.client.numpy_client.NumPyClient.fit:13 of +msgid "" +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can be " +"used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.evaluate:19 of +msgid "" +"The previous return type format (int, float, float) and the extended " +"format (int, float, float, Dict[str, Scalar]) have been deprecated and " +"removed since Flower 0.19." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:1 of +msgid "Train the provided parameters using the locally held dataset." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:5 of +msgid "" +"Configuration parameters which allow the server to influence training on " +"the client. It can be used to communicate arbitrary values from the " +"server to the client, for example, to set the number of (local) training " +"epochs." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "" +"* **parameters** (*NDArrays*) -- The locally updated model parameters. * " +"**num_examples** (*int*) -- The number of examples used for training. * " +"**metrics** (*Dict[str, Scalar]*) -- A dictionary mapping arbitrary " +"string keys to values of type bool, bytes, float, int, or str. It can " +"be used to communicate arbitrary values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:11 of +msgid "**parameters** (*NDArrays*) -- The locally updated model parameters." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.fit:12 of +msgid "**num_examples** (*int*) -- The number of examples used for training." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which parameters are needed along with some Scalar " +"attributes." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_parameters:8 of +msgid "**parameters** -- The local model parameters as a list of NumPy ndarrays." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:1 of +msgid "Return a client's set of properties." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:3 of +msgid "" +"Configuration parameters requested by the server. This can be used to " +"tell the client which properties are needed along with some Scalar " +"attributes." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.get_properties:8 of +msgid "" +"**properties** -- A dictionary mapping arbitrary string keys to values of" +" type bool, bytes, float, int, or str. It can be used to communicate " +"arbitrary property values back to the server." +msgstr "" + +#: flwr.client.numpy_client.NumPyClient.to_client:1 of +msgid "Convert to object to Client type and return it." +msgstr "" + +#: ../../source/ref-api-flwr.rst:41 +msgid "start_numpy_client" +msgstr "" + +#: flwr.client.app.start_numpy_client:1 of +msgid "Start a Flower NumPyClient which connects to a gRPC server." +msgstr "" + +#: flwr.client.app.start_numpy_client:7 of +msgid "An implementation of the abstract base class `flwr.client.NumPyClient`." +msgstr "" + +#: flwr.client.app.start_numpy_client:28 of +msgid "Starting a client with an insecure server connection:" +msgstr "" + +#: ../../source/ref-api-flwr.rst:49 +msgid "start_simulation" +msgstr "" + +#: flwr.simulation.app.start_simulation:1 of +msgid "Start a Ray-based Flower simulation server." +msgstr "" + +#: flwr.simulation.app.start_simulation:3 of +msgid "" +"A function creating client instances. The function must take a single " +"`str` argument called `cid`. It should return a single client instance of" +" type Client. Note that the created client instances are ephemeral and " +"will often be destroyed after a single method invocation. Since client " +"instances are not long-lived, they should not attempt to carry state over" +" method invocations. Any state required by the instance (model, dataset, " +"hyperparameters, ...) should be (re-)created in either the call to " +"`client_fn` or the call to any of the client methods (e.g., load " +"evaluation data in the `evaluate` method itself)." +msgstr "" + +#: flwr.simulation.app.start_simulation:13 of +msgid "" +"The total number of clients in this simulation. This must be set if " +"`clients_ids` is not set and vice-versa." +msgstr "" + +#: flwr.simulation.app.start_simulation:16 of +msgid "" +"List `client_id`s for each client. This is only required if `num_clients`" +" is not set. Setting both `num_clients` and `clients_ids` with " +"`len(clients_ids)` not equal to `num_clients` generates an error." +msgstr "" + +#: flwr.simulation.app.start_simulation:20 of +msgid "" +"\"num_gpus\": 0.0}` CPU and GPU resources for a single client. Supported " +"keys are `num_cpus` and `num_gpus`. To understand the GPU utilization " +"caused by `num_gpus`, as well as using custom resources, please consult " +"the Ray documentation." +msgstr "" + +#: flwr.simulation.app.start_simulation:25 of +msgid "" +"An implementation of the abstract base class `flwr.server.Server`. If no " +"instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.server.app.start_server:9 flwr.simulation.app.start_simulation:28 of +msgid "" +"Currently supported values are `num_rounds` (int, default: 1) and " +"`round_timeout` in seconds (float, default: None)." +msgstr "" + +#: flwr.simulation.app.start_simulation:31 of +msgid "" +"An implementation of the abstract base class `flwr.server.Strategy`. If " +"no strategy is provided, then `start_server` will use " +"`flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.simulation.app.start_simulation:35 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_simulation` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args: { \"ignore_reinit_error\": True, " +"\"include_dashboard\": False } An empty dictionary can be used " +"(ray_init_args={}) to prevent any arguments from being passed to " +"ray.init." +msgstr "" + +#: flwr.simulation.app.start_simulation:39 of +msgid "" +"Optional dictionary containing arguments for the call to `ray.init`. If " +"ray_init_args is None (the default), Ray will be initialized with the " +"following default args:" +msgstr "" + +#: flwr.simulation.app.start_simulation:43 of +msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +msgstr "" + +#: flwr.simulation.app.start_simulation:45 of +msgid "" +"An empty dictionary can be used (ray_init_args={}) to prevent any " +"arguments from being passed to ray.init." +msgstr "" + +#: flwr.simulation.app.start_simulation:48 of +msgid "" +"Set to True to prevent `ray.shutdown()` in case " +"`ray.is_initialized()=True`." +msgstr "" + +#: flwr.simulation.app.start_simulation:50 of +msgid "" +"Optionally specify the type of actor to use. The actor object, which " +"persists throughout the simulation, will be the process in charge of " +"running the clients' jobs (i.e. their `fit()` method)." +msgstr "" + +#: flwr.simulation.app.start_simulation:54 of +msgid "" +"If you want to create your own Actor classes, you might need to pass some" +" input argument. You can use this dictionary for such purpose." +msgstr "" + +#: flwr.simulation.app.start_simulation:57 of +msgid "" +"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " +"the VCE to choose in which node the actor is placed. If you are an " +"advanced user needed more control you can use lower-level scheduling " +"strategies to pin actors to specific compute nodes (e.g. via " +"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." +" For all details, please refer to the Ray documentation: " +"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" +msgstr "" + +#: flwr.simulation.app.start_simulation:66 of +msgid "**hist** -- Object containing metrics from training." +msgstr "" + +#: ../../source/ref-api-flwr.rst:57 +msgid "server" +msgstr "" + +#: flwr.server:1 of +msgid "Flower server." +msgstr "" + +#: ../../source/ref-api-flwr.rst:65 +msgid "server.start_server" +msgstr "" + +#: flwr.server.app.start_server:1 of +msgid "Start a Flower server using the gRPC transport layer." +msgstr "" + +#: flwr.server.app.start_server:3 of +msgid "The IPv4 or IPv6 address of the server. Defaults to `\"[::]:8080\"`." +msgstr "" + +#: flwr.server.app.start_server:5 of +msgid "" +"A server implementation, either `flwr.server.Server` or a subclass " +"thereof. If no instance is provided, then `start_server` will create one." +msgstr "" + +#: flwr.server.app.start_server:12 of +msgid "" +"An implementation of the abstract base class " +"`flwr.server.strategy.Strategy`. If no strategy is provided, then " +"`start_server` will use `flwr.server.strategy.FedAvg`." +msgstr "" + +#: flwr.server.app.start_server:16 of +msgid "" +"An implementation of the abstract base class `flwr.server.ClientManager`." +" If no implementation is provided, then `start_server` will use " +"`flwr.server.client_manager.SimpleClientManager`." +msgstr "" + +#: flwr.server.app.start_server:21 of +msgid "" +"The maximum length of gRPC messages that can be exchanged with the Flower" +" clients. The default should be sufficient for most models. Users who " +"train very large models might need to increase this value. Note that the " +"Flower clients need to be started with the same value (see " +"`flwr.client.start_client`), otherwise clients will not know about the " +"increased limit and block larger messages." +msgstr "" + +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order: * CA certificate. * " +"server certificate. * server private key." +msgstr "" + +#: flwr.server.app.start_server:28 of +msgid "" +"Tuple containing root certificate, server certificate, and private key to" +" start a secure SSL-enabled server. The tuple is expected to have three " +"bytes elements in the following order:" +msgstr "" + +#: flwr.server.app.start_server:32 of +msgid "CA certificate." +msgstr "" + +#: flwr.server.app.start_server:33 of +msgid "server certificate." +msgstr "" + +#: flwr.server.app.start_server:34 of +msgid "server private key." +msgstr "" + +#: flwr.server.app.start_server:37 of +msgid "**hist** -- Object containing training and evaluation metrics." +msgstr "" + +#: flwr.server.app.start_server:42 of +msgid "Starting an insecure server:" +msgstr "" + +#: flwr.server.app.start_server:46 of +msgid "Starting an SSL-enabled server:" +msgstr "" + +#: ../../source/ref-api-flwr.rst:73 +msgid "server.strategy" +msgstr "" + +#: flwr.server.strategy:1 of +msgid "Contains the strategy abstraction and different implementations." +msgstr "" + +#: ../../source/ref-api-flwr.rst:81 +msgid "server.strategy.Strategy" +msgstr "" + +#: flwr.server.strategy.strategy.Strategy:1 of +msgid "Abstract base class for server strategy implementations." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:1 of +msgid "Aggregate evaluation results." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:3 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:6 +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:3 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:3 +#: flwr.server.strategy.strategy.Strategy.configure_fit:3 +#: flwr.server.strategy.strategy.Strategy.evaluate:6 of +msgid "The current round of federated learning." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes` constitutes a successful update from " +"one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:13 +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:13 of +msgid "Exceptions that occurred while the server was waiting for client updates." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate:16 of +msgid "" +"**aggregation_result** -- The aggregated evaluation result. Aggregation " +"typically uses some variant of a weighted average." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:1 of +msgid "Aggregate training results." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:5 of +msgid "" +"Successful updates from the previously selected and configured clients. " +"Each pair of `(ClientProxy, FitRes)` constitutes a successful update from" +" one of the previously selected clients. Not that not all previously " +"selected clients are necessarily included in this list: a client might " +"drop out and not submit a result. For each client that did not submit an " +"update, there should be an `Exception` in `failures`." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.aggregate_fit:17 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the new global model parameters (i.e., it will replace the " +"previous parameters with the ones returned from this method). If `None` " +"is returned (e.g., because there were only failures and no viable " +"results) then the server will no update the previous model parameters, " +"the updates received in this round are discarded, and the global model " +"parameters remain the same." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.configure_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_evaluate:1 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:1 of +msgid "Configure the next round of evaluation." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:7 +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:7 +#: flwr.server.strategy.strategy.Strategy.configure_fit:7 +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:3 of +msgid "The client manager which holds all currently connected clients." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:10 +#: flwr.server.strategy.strategy.Strategy.configure_evaluate:10 of +msgid "" +"**evaluate_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `EvaluateIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated evaluation." +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.configure_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.configure_fit:1 +#: flwr.server.strategy.fedprox.FedProx.configure_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.configure_fit:1 +#: flwr.server.strategy.strategy.Strategy.configure_fit:1 of +msgid "Configure the next round of training." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:13 +#: flwr.server.strategy.strategy.Strategy.configure_fit:10 of +msgid "" +"**fit_configuration** -- A list of tuples. Each tuple in the list " +"identifies a `ClientProxy` and the `FitIns` for this particular " +"`ClientProxy`. If a particular `ClientProxy` is not included in this " +"list, it means that this `ClientProxy` will not participate in the next " +"round of federated learning." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:1 of +msgid "Evaluate the current model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:3 of +msgid "" +"This function can be used to perform centralized (i.e., server-side) " +"evaluation of model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.evaluate:11 of +msgid "" +"**evaluation_result** -- The evaluation result, usually a Tuple " +"containing loss and a dictionary containing task-specific metrics (e.g., " +"accuracy)." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:1 of +msgid "Initialize the (global) model parameters." +msgstr "" + +#: flwr.server.strategy.strategy.Strategy.initialize_parameters:6 of +msgid "" +"**parameters** -- If parameters are returned, then the server will treat " +"these as the initial global model parameters." +msgstr "" + +#: ../../source/ref-api-flwr.rst:90 +msgid "server.strategy.FedAvg" +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg:1 of +msgid "Configurable FedAvg strategy implementation." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:1 +#: flwr.server.strategy.fedavg.FedAvg.__init__:1 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:1 of +msgid "Federated Averaging strategy." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:3 +#: flwr.server.strategy.fedavg.FedAvg.__init__:3 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:3 of +msgid "Implementation based on https://arxiv.org/abs/1602.05629" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:5 +#: flwr.server.strategy.fedavg.FedAvg.__init__:5 +#: flwr.server.strategy.fedprox.FedProx.__init__:37 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:5 of +msgid "" +"Fraction of clients used during training. In case `min_fit_clients` is " +"larger than `fraction_fit * available_clients`, `min_fit_clients` will " +"still be sampled. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:9 +#: flwr.server.strategy.fedavg.FedAvg.__init__:9 +#: flwr.server.strategy.fedprox.FedProx.__init__:41 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:9 of +msgid "" +"Fraction of clients used during validation. In case " +"`min_evaluate_clients` is larger than `fraction_evaluate * " +"available_clients`, `min_evaluate_clients` will still be sampled. " +"Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:9 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:13 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:9 +#: flwr.server.strategy.fedadam.FedAdam.__init__:9 +#: flwr.server.strategy.fedavg.FedAvg.__init__:13 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:9 +#: flwr.server.strategy.fedopt.FedOpt.__init__:9 +#: flwr.server.strategy.fedprox.FedProx.__init__:45 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:7 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:9 +#: flwr.server.strategy.krum.Krum.__init__:7 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:13 of +msgid "Minimum number of clients used during training. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:11 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:15 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:11 +#: flwr.server.strategy.fedadam.FedAdam.__init__:11 +#: flwr.server.strategy.fedavg.FedAvg.__init__:15 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:11 +#: flwr.server.strategy.fedopt.FedOpt.__init__:11 +#: flwr.server.strategy.fedprox.FedProx.__init__:47 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:9 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:11 +#: flwr.server.strategy.krum.Krum.__init__:9 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:15 of +msgid "Minimum number of clients used during validation. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:13 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:17 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:13 +#: flwr.server.strategy.fedadam.FedAdam.__init__:13 +#: flwr.server.strategy.fedavg.FedAvg.__init__:17 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:13 +#: flwr.server.strategy.fedopt.FedOpt.__init__:13 +#: flwr.server.strategy.fedprox.FedProx.__init__:49 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:11 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:13 +#: flwr.server.strategy.krum.Krum.__init__:11 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:17 of +msgid "Minimum number of total clients in the system. Defaults to 2." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:17 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:19 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:15 +#: flwr.server.strategy.fedadam.FedAdam.__init__:15 +#: flwr.server.strategy.fedavg.FedAvg.__init__:19 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:15 +#: flwr.server.strategy.fedopt.FedOpt.__init__:15 +#: flwr.server.strategy.fedprox.FedProx.__init__:51 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:13 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:15 +#: flwr.server.strategy.krum.Krum.__init__:18 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:19 of +msgid "Optional function used for validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:19 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:17 +#: flwr.server.strategy.fedadam.FedAdam.__init__:17 +#: flwr.server.strategy.fedavg.FedAvg.__init__:21 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:17 +#: flwr.server.strategy.fedopt.FedOpt.__init__:17 +#: flwr.server.strategy.fedprox.FedProx.__init__:53 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:15 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:17 +#: flwr.server.strategy.krum.Krum.__init__:20 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:21 of +msgid "Function used to configure training. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:21 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:23 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:19 +#: flwr.server.strategy.fedadam.FedAdam.__init__:19 +#: flwr.server.strategy.fedavg.FedAvg.__init__:23 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:19 +#: flwr.server.strategy.fedopt.FedOpt.__init__:19 +#: flwr.server.strategy.fedprox.FedProx.__init__:55 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:17 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:19 +#: flwr.server.strategy.krum.Krum.__init__:22 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:23 of +msgid "Function used to configure validation. Defaults to None." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:23 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:25 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:25 +#: flwr.server.strategy.fedadam.FedAdam.__init__:21 +#: flwr.server.strategy.fedavg.FedAvg.__init__:25 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:21 +#: flwr.server.strategy.fedopt.FedOpt.__init__:21 +#: flwr.server.strategy.fedprox.FedProx.__init__:57 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:19 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:21 +#: flwr.server.strategy.krum.Krum.__init__:24 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:25 of +msgid "Whether or not accept rounds containing failures. Defaults to True." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:25 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:27 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:27 +#: flwr.server.strategy.fedadam.FedAdam.__init__:23 +#: flwr.server.strategy.fedavg.FedAvg.__init__:27 +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:23 +#: flwr.server.strategy.fedopt.FedOpt.__init__:23 +#: flwr.server.strategy.fedprox.FedProx.__init__:59 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:21 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:23 +#: flwr.server.strategy.krum.Krum.__init__:26 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:27 of +msgid "Initial global model parameters." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:29 +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.__init__:31 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:21 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:23 +#: flwr.server.strategy.fedadam.FedAdam.__init__:25 +#: flwr.server.strategy.fedadam.FedAdam.__init__:27 +#: flwr.server.strategy.fedavg.FedAvg.__init__:29 +#: flwr.server.strategy.fedavg.FedAvg.__init__:31 +#: flwr.server.strategy.fedopt.FedOpt.__init__:25 +#: flwr.server.strategy.fedopt.FedOpt.__init__:27 +#: flwr.server.strategy.fedprox.FedProx.__init__:61 +#: flwr.server.strategy.fedprox.FedProx.__init__:63 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:25 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:27 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:29 +#: flwr.server.strategy.qfedavg.QFedAvg.__init__:31 of +msgid "Metrics aggregation function, optional." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_evaluate:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_evaluate:1 of +msgid "Aggregate evaluation losses using weighted average." +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedadagrad.FedAdagrad.aggregate_fit:1 +#: flwr.server.strategy.fedadam.FedAdam.aggregate_fit:1 +#: flwr.server.strategy.fedavg.FedAvg.aggregate_fit:1 +#: flwr.server.strategy.fedavgm.FedAvgM.aggregate_fit:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.aggregate_fit:1 +#: flwr.server.strategy.fedyogi.FedYogi.aggregate_fit:1 +#: flwr.server.strategy.qfedavg.QFedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using weighted average." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.evaluate:1 +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.initialize_parameters:1 +#: flwr.server.strategy.fedavgm.FedAvgM.initialize_parameters:1 of +msgid "Initialize global model parameters." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.num_evaluation_clients:1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_evaluation_clients:1 of +msgid "Use a fraction of available clients for evaluation." +msgstr "" + +#: flwr.server.strategy.fedavg.FedAvg.num_fit_clients:1 +#: flwr.server.strategy.qfedavg.QFedAvg.num_fit_clients:1 of +msgid "Return the sample size and the required number of available clients." +msgstr "" + +#: ../../source/ref-api-flwr.rst:101 +msgid "server.strategy.FedAvgM" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM:1 +#: flwr.server.strategy.fedmedian.FedMedian:1 of +msgid "Configurable FedAvg with Momentum strategy implementation." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:1 of +msgid "Federated Averaging with Momentum strategy." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:3 of +msgid "Implementation based on https://arxiv.org/pdf/1909.06335.pdf" +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:5 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:3 +#: flwr.server.strategy.krum.Krum.__init__:3 of +msgid "Fraction of clients used during training. Defaults to 0.1." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:7 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:5 +#: flwr.server.strategy.krum.Krum.__init__:5 of +msgid "Fraction of clients used during validation. Defaults to 0.1." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:25 of +msgid "" +"Server-side learning rate used in server-side optimization. Defaults to " +"1.0." +msgstr "" + +#: flwr.server.strategy.fedavgm.FedAvgM.__init__:28 of +msgid "Server-side momentum factor used for FedAvgM. Defaults to 0.0." +msgstr "" + +#: ../../source/ref-api-flwr.rst:112 +msgid "server.strategy.FedMedian" +msgstr "" + +#: flwr.server.strategy.fedmedian.FedMedian.aggregate_fit:1 of +msgid "Aggregate fit results using median." +msgstr "" + +#: ../../source/ref-api-flwr.rst:122 +msgid "server.strategy.QFedAvg" +msgstr "" + +#: flwr.server.strategy.qfedavg.QFedAvg:1 of +msgid "Configurable QFedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api-flwr.rst:133 +msgid "server.strategy.FaultTolerantFedAvg" +msgstr "" + +#: flwr.server.strategy.fault_tolerant_fedavg.FaultTolerantFedAvg:1 of +msgid "Configurable fault-tolerant FedAvg strategy implementation." +msgstr "" + +#: ../../source/ref-api-flwr.rst:144 +msgid "server.strategy.FedOpt" +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt:1 of +msgid "Configurable FedAdagrad strategy implementation." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt.__init__:1 of +msgid "Federated Optim strategy interface." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:3 +#: flwr.server.strategy.fedadam.FedAdam.__init__:3 +#: flwr.server.strategy.fedopt.FedOpt.__init__:3 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:3 of +msgid "Implementation based on https://arxiv.org/abs/2003.00295v5" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:5 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:5 +#: flwr.server.strategy.fedadam.FedAdam.__init__:5 +#: flwr.server.strategy.fedopt.FedOpt.__init__:5 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:5 of +msgid "Fraction of clients used during training. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:7 +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:7 +#: flwr.server.strategy.fedadam.FedAdam.__init__:7 +#: flwr.server.strategy.fedopt.FedOpt.__init__:7 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:7 of +msgid "Fraction of clients used during validation. Defaults to 1.0." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:29 +#: flwr.server.strategy.fedadam.FedAdam.__init__:29 +#: flwr.server.strategy.fedopt.FedOpt.__init__:29 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:29 of +msgid "Server-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:31 +#: flwr.server.strategy.fedadam.FedAdam.__init__:31 +#: flwr.server.strategy.fedopt.FedOpt.__init__:31 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:31 of +msgid "Client-side learning rate. Defaults to 1e-1." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt.__init__:33 of +msgid "Momentum parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedopt.FedOpt.__init__:35 of +msgid "Second moment parameter. Defaults to 0.0." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:33 +#: flwr.server.strategy.fedadam.FedAdam.__init__:37 +#: flwr.server.strategy.fedopt.FedOpt.__init__:37 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:37 of +msgid "Controls the algorithm's degree of adaptability. Defaults to 1e-9." +msgstr "" + +#: ../../source/ref-api-flwr.rst:155 +msgid "server.strategy.FedProx" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx:1 of +msgid "Configurable FedProx strategy implementation." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:1 of +msgid "Federated Optimization strategy." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:3 of +msgid "Implementation based on https://arxiv.org/abs/1812.06127" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:5 of +msgid "" +"The strategy in itself will not be different than FedAvg, the client " +"needs to be adjusted. A proximal term needs to be added to the loss " +"function during the training:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:9 of +msgid "" +"\\\\frac{\\\\mu}{2} || w - w^t ||^2\n" +"\n" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:12 of +msgid "" +"Where $w^t$ are the global parameters and $w$ are the local weights the " +"function will be optimized with." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:15 of +msgid "In PyTorch, for example, the loss would go from:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:21 of +msgid "To:" +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:30 of +msgid "" +"With `global_params` being a copy of the parameters before the training " +"takes place." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.__init__:65 of +msgid "" +"The weight of the proximal term used in the optimization. 0.0 makes this " +"strategy equivalent to FedAvg, and the higher the coefficient, the more " +"regularization will be used (that is, the client parameters will need to " +"be closer to the server parameters during training)." +msgstr "" + +#: flwr.server.strategy.fedprox.FedProx.configure_fit:3 of +msgid "Sends the proximal factor mu to the clients" +msgstr "" + +#: ../../source/ref-api-flwr.rst:166 +msgid "server.strategy.FedAdagrad" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:1 of +msgid "FedAdagrad strategy - Adaptive Federated Optimization using Adagrad." +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad:3 +#: flwr.server.strategy.fedadam.FedAdam:3 +#: flwr.server.strategy.fedyogi.FedYogi:5 of +msgid "Paper: https://arxiv.org/abs/2003.00295" +msgstr "" + +#: flwr.server.strategy.fedadagrad.FedAdagrad.__init__:1 +#: flwr.server.strategy.fedadam.FedAdam.__init__:1 of +msgid "Federated learning strategy using Adagrad on server-side." +msgstr "" + +#: ../../source/ref-api-flwr.rst:177 +msgid "server.strategy.FedAdam" +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam:1 of +msgid "FedAdam - Adaptive Federated Optimization using Adam." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam.__init__:33 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:33 of +msgid "Momentum parameter. Defaults to 0.9." +msgstr "" + +#: flwr.server.strategy.fedadam.FedAdam.__init__:35 +#: flwr.server.strategy.fedyogi.FedYogi.__init__:35 of +msgid "Second moment parameter. Defaults to 0.99." +msgstr "" + +#: ../../source/ref-api-flwr.rst:188 +msgid "server.strategy.FedYogi" +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:1 of +msgid "FedYogi [Reddi et al., 2020] strategy." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi:3 of +msgid "Adaptive Federated Optimization using Yogi." +msgstr "" + +#: flwr.server.strategy.fedyogi.FedYogi.__init__:1 of +msgid "Federated learning strategy using Yogi on server-side." +msgstr "" + +#: ../../source/ref-api-flwr.rst:199 +msgid "server.strategy.FedTrimmedAvg" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:1 +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:1 of +msgid "Federated Averaging with Trimmed Mean [Dong Yin, et al., 2021]." +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg:3 of +msgid "Paper: https://arxiv.org/abs/1803.01498" +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.__init__:23 of +msgid "Fraction to cut off of both tails of the distribution. Defaults to 0.2." +msgstr "" + +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg.aggregate_fit:1 of +msgid "Aggregate fit results using trimmed average." +msgstr "" + +#: ../../source/ref-api-flwr.rst:210 +msgid "server.strategy.Krum" +msgstr "" + +#: flwr.server.strategy.krum.Krum:1 of +msgid "Configurable Krum strategy implementation." +msgstr "" + +#: flwr.server.strategy.krum.Krum.__init__:1 of +msgid "Krum strategy." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:15 +#: flwr.server.strategy.krum.Krum.__init__:13 of +msgid "Number of malicious clients in the system. Defaults to 0." +msgstr "" + +#: flwr.server.strategy.krum.Krum.__init__:15 of +msgid "" +"Number of clients to keep before averaging (MultiKrum). Defaults to 0, in" +" that case classical Krum is applied." +msgstr "" + +#: flwr.server.strategy.krum.Krum.aggregate_fit:1 of +msgid "Aggregate fit results using Krum." +msgstr "" + +#: ../../source/ref-api-flwr.rst:220 +msgid "server.strategy.Bulyan" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan:1 of +msgid "Bulyan strategy implementation." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:1 of +msgid "Bulyan strategy." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:3 of +msgid "Implementation based on https://arxiv.org/abs/1802.07927." +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:27 of +msgid "" +"Byzantine resilient aggregation rule that is used as the first step of " +"the Bulyan (e.g., Krum)" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.__init__:29 of +msgid "arguments to the first_aggregation rule" +msgstr "" + +#: flwr.server.strategy.bulyan.Bulyan.aggregate_fit:1 of +msgid "Aggregate fit results using Bulyan." +msgstr "" + +#: ../../source/ref-api-flwr.rst:231 +msgid "server.strategy.FedXgbNnAvg" +msgstr "" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg:1 of +msgid "Configurable FedXgbNnAvg strategy implementation." +msgstr "" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.__init__:1 of +msgid "Federated XGBoost [Ma et al., 2023] strategy." +msgstr "" + +#: flwr.server.strategy.fedxgb_nn_avg.FedXgbNnAvg.__init__:3 of +msgid "Implementation based on https://arxiv.org/abs/2304.07537." +msgstr "" + +#: ../../source/ref-api-flwr.rst:242 +msgid "server.strategy.DPFedAvgAdaptive" +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive:1 of +msgid "Wrapper for configuring a Strategy for DP with Adaptive Clipping." +msgstr "" + +#: flwr.server.strategy.dpfedavg_adaptive.DPFedAvgAdaptive.aggregate_fit:1 of +msgid "Aggregate training results as in DPFedAvgFixed and update clip norms." +msgstr "" + +#: ../../source/ref-api-flwr.rst:253 +msgid "server.strategy.DPFedAvgFixed" +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed:1 of +msgid "Wrapper for configuring a Strategy for DP with Fixed Clipping." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_evaluate:1 of +msgid "Aggregate evaluation losses using the given strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.aggregate_fit:1 of +msgid "Aggregate training results using unweighted aggregation." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate:1 of +msgid "Configure the next round of evaluation using the specified strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:1 of +msgid "" +"Configure the next round of training incorporating Differential Privacy " +"(DP)." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit:3 of +msgid "" +"Configuration of the next training round includes information related to " +"DP, such as clip norm and noise stddev." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.evaluate:1 of +msgid "Evaluate model parameters using an evaluation function from the strategy." +msgstr "" + +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.initialize_parameters:1 of +msgid "Initialize global model parameters using given strategy." +msgstr "" + +#: ../../source/ref-api-flwr.rst:261 +msgid "common" +msgstr "" + +#: flwr.common:1 of +msgid "Common components shared between server and client." +msgstr "" + +#: flwr.common.typing.ClientMessage:1 of +msgid "ClientMessage is a container used to hold one result message." +msgstr "" + +#: flwr.common.typing.Code:1 of +msgid "Client status codes." +msgstr "" + +#: flwr.common.typing.DisconnectRes:1 of +msgid "DisconnectRes message from client to server." +msgstr "" + +#: flwr.common.typing.EvaluateIns:1 of +msgid "Evaluate instructions for a client." +msgstr "" + +#: flwr.common.typing.EvaluateRes:1 of +msgid "Evaluate response from a client." +msgstr "" + +#: flwr.common.telemetry.EventType:1 of +msgid "Types of telemetry events." +msgstr "" + +#: flwr.common.typing.FitIns:1 of +msgid "Fit instructions for a client." +msgstr "" + +#: flwr.common.typing.FitRes:1 of +msgid "Fit response from a client." +msgstr "" + +#: flwr.common.typing.GetParametersIns:1 of +msgid "Parameters request for a client." +msgstr "" + +#: flwr.common.typing.GetParametersRes:1 of +msgid "Response when asked to return parameters." +msgstr "" + +#: flwr.common.typing.GetPropertiesIns:1 of +msgid "Properties request for a client." +msgstr "" + +#: flwr.common.typing.GetPropertiesRes:1 of +msgid "Properties response from a client." +msgstr "" + +#: flwr.common.typing.Parameters:1 of +msgid "Model parameters." +msgstr "" + +#: flwr.common.typing.ReconnectIns:1 of +msgid "ReconnectIns message from server to client." +msgstr "" + +#: flwr.common.typing.ServerMessage:1 of +msgid "ServerMessage is a container used to hold one instruction message." +msgstr "" + +#: flwr.common.typing.Status:1 of +msgid "Client status." +msgstr "" + +#: flwr.common.parameter.bytes_to_ndarray:1 of +msgid "Deserialize NumPy ndarray from bytes." +msgstr "" + +#: flwr.common.logger.configure:1 of +msgid "Configure logging to file and/or remote log server." +msgstr "" + +#: logging.Logger.log:1 of +msgid "Log 'msg % args' with the integer severity 'level'." +msgstr "" + +#: logging.Logger.log:3 of +msgid "" +"To pass exception information, use the keyword argument exc_info with a " +"true value, e.g." +msgstr "" + +#: logging.Logger.log:6 of +#, python-format +msgid "logger.log(level, \"We have a %s\", \"mysterious problem\", exc_info=1)" +msgstr "" + +#: flwr.common.parameter.ndarray_to_bytes:1 of +msgid "Serialize NumPy ndarray to bytes." +msgstr "" + +#: flwr.common.parameter.ndarrays_to_parameters:1 of +msgid "Convert NumPy ndarrays to parameters object." +msgstr "" + +#: flwr.common.date.now:1 of +msgid "Construct a datetime from time.time() with time zone set to UTC." +msgstr "" + +#: flwr.common.parameter.parameters_to_ndarrays:1 of +msgid "Convert parameters object to NumPy ndarrays." +msgstr "" + +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "" + +#: ../../source/ref-changelog.md:3 +msgid "Unreleased" +msgstr "" + +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:83 +#: ../../source/ref-changelog.md:167 ../../source/ref-changelog.md:231 +#: ../../source/ref-changelog.md:289 ../../source/ref-changelog.md:358 +#: ../../source/ref-changelog.md:487 ../../source/ref-changelog.md:529 +#: ../../source/ref-changelog.md:596 ../../source/ref-changelog.md:662 +#: ../../source/ref-changelog.md:707 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:779 ../../source/ref-changelog.md:829 +msgid "What's new?" +msgstr "" + +#: ../../source/ref-changelog.md:7 +msgid "" +"**Add experimental support for Python 3.12** " +"([#2565](https://github.com/adap/flower/pull/2565))" +msgstr "" + +#: ../../source/ref-changelog.md:9 +msgid "" +"**Support custom** `ClientManager` **in** `start_driver()` " +"([#2292](https://github.com/adap/flower/pull/2292))" +msgstr "" + +#: ../../source/ref-changelog.md:11 +msgid "" +"**Update REST API to support create and delete nodes** " +"([#2283](https://github.com/adap/flower/pull/2283))" +msgstr "" + +#: ../../source/ref-changelog.md:13 +msgid "" +"**Update the C++ SDK** " +"([#2537](https://github/com/adap/flower/pull/2537), " +"[#2528](https://github/com/adap/flower/pull/2528), " +"[#2523](https://github.com/adap/flower/pull/2523), " +"[#2522](https://github.com/adap/flower/pull/2522))" +msgstr "" + +#: ../../source/ref-changelog.md:15 +msgid "Add gRPC request-response capability to the C++ SDK." +msgstr "" + +#: ../../source/ref-changelog.md:17 +msgid "" +"**Fix the incorrect return types of Strategy** " +"([#2432](https://github.com/adap/flower/pull/2432/files))" +msgstr "" + +#: ../../source/ref-changelog.md:19 +msgid "" +"The types of the return values in the docstrings in two methods " +"(`aggregate_fit` and `aggregate_evaluate`) now match the hint types in " +"the code." +msgstr "" + +#: ../../source/ref-changelog.md:21 +msgid "" +"**Unify client API** ([#2303](https://github.com/adap/flower/pull/2303), " +"[#2390](https://github.com/adap/flower/pull/2390), " +"[#2493](https://github.com/adap/flower/pull/2493))" +msgstr "" + +#: ../../source/ref-changelog.md:23 +msgid "" +"Using the `client_fn`, Flower clients can interchangeably run as " +"standalone processes (i.e. via `start_client`) or in simulation (i.e. via" +" `start_simulation`) without requiring changes to how the client class is" +" defined and instantiated. Calling `start_numpy_client` is now " +"deprecated." +msgstr "" + +#: ../../source/ref-changelog.md:25 +msgid "**Update Flower Baselines**" +msgstr "" + +#: ../../source/ref-changelog.md:27 +msgid "" +"FedProx ([#2210](https://github.com/adap/flower/pull/2210), " +"[#2286](https://github.com/adap/flower/pull/2286), " +"[#2509](https://github.com/adap/flower/pull/2509))" +msgstr "" + +#: ../../source/ref-changelog.md:29 +msgid "" +"Baselines Docs ([#2290](https://github.com/adap/flower/pull/2290), " +"[#2400](https://github.com/adap/flower/pull/2400))" +msgstr "" + +#: ../../source/ref-changelog.md:31 +msgid "" +"FedMLB ([#2340](https://github.com/adap/flower/pull/2340), " +"[#2507](https://github.com/adap/flower/pull/2507))" +msgstr "" + +#: ../../source/ref-changelog.md:33 +msgid "" +"TAMUNA ([#2254](https://github.com/adap/flower/pull/2254), " +"[#2508](https://github.com/adap/flower/pull/2508))" +msgstr "" + +#: ../../source/ref-changelog.md:35 +msgid "FedMeta [#2438](https://github.com/adap/flower/pull/2438)" +msgstr "" + +#: ../../source/ref-changelog.md:37 +msgid "FjORD [#2431](https://github.com/adap/flower/pull/2431)" +msgstr "" + +#: ../../source/ref-changelog.md:39 +msgid "MOON [#2421](https://github.com/adap/flower/pull/2421)" +msgstr "" + +#: ../../source/ref-changelog.md:41 +msgid "DepthFL [#2295](https://github.com/adap/flower/pull/2295)" +msgstr "" + +#: ../../source/ref-changelog.md:43 +msgid "FedPer [#2266](https://github.com/adap/flower/pull/2266)" +msgstr "" + +#: ../../source/ref-changelog.md:45 +msgid "FedWav2vec [#2551](https://github.com/adap/flower/pull/2551)" +msgstr "" + +#: ../../source/ref-changelog.md:47 +msgid "niid-Bench [#2428](https://github.com/adap/flower/pull/2428)" +msgstr "" + +#: ../../source/ref-changelog.md:49 +msgid "" +"FedBN ([#2608](https://github.com/adap/flower/pull/2608), " +"[#2615](https://github.com/adap/flower/pull/2615))" +msgstr "" + +#: ../../source/ref-changelog.md:51 +msgid "" +"**Update Flower Examples** " +"([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425)," +" [#2526](https://github.com/adap/flower/pull/2526))" +msgstr "" + +#: ../../source/ref-changelog.md:53 +msgid "" +"**General updates to baselines** " +"([#2301](https://github.com/adap/flower/pull/2301), " +"[#2305](https://github.com/adap/flower/pull/2305), " +"[#2307](https://github.com/adap/flower/pull/2307), " +"[#2327](https://github.com/adap/flower/pull/2327), " +"[#2435](https://github.com/adap/flower/pull/2435))" +msgstr "" + +#: ../../source/ref-changelog.md:55 +msgid "" +"**General updates to the simulation engine** " +"([#2331](https://github.com/adap/flower/pull/2331), " +"[#2447](https://github.com/adap/flower/pull/2447), " +"[#2448](https://github.com/adap/flower/pull/2448))" +msgstr "" + +#: ../../source/ref-changelog.md:57 +msgid "" +"**General improvements** " +"([#2309](https://github.com/adap/flower/pull/2309), " +"[#2310](https://github.com/adap/flower/pull/2310), " +"[2313](https://github.com/adap/flower/pull/2313), " +"[#2316](https://github.com/adap/flower/pull/2316), " +"[2317](https://github.com/adap/flower/pull/2317),[#2349](https://github.com/adap/flower/pull/2349)," +" [#2360](https://github.com/adap/flower/pull/2360), " +"[#2402](https://github.com/adap/flower/pull/2402), " +"[#2446](https://github.com/adap/flower/pull/2446) " +"[#2561](https://github.com/adap/flower/pull/2561))" +msgstr "" + +#: ../../source/ref-changelog.md:59 ../../source/ref-changelog.md:153 +#: ../../source/ref-changelog.md:217 ../../source/ref-changelog.md:271 +#: ../../source/ref-changelog.md:338 +msgid "Flower received many improvements under the hood, too many to list here." +msgstr "" + +#: ../../source/ref-changelog.md:61 +msgid "" +"**Add new** `Bulyan` **strategy** " +"([#1817](https://github.com/adap/flower/pull/1817), " +"[#1891](https://github.com/adap/flower/pull/1891))" +msgstr "" + +#: ../../source/ref-changelog.md:63 +msgid "" +"The new `Bulyan` strategy implements Bulyan by [El Mhamdi et al., " +"2018](https://arxiv.org/abs/1802.07927)" +msgstr "" + +#: ../../source/ref-changelog.md:65 ../../source/ref-changelog.md:155 +#: ../../source/ref-changelog.md:219 ../../source/ref-changelog.md:277 +#: ../../source/ref-changelog.md:346 ../../source/ref-changelog.md:408 +#: ../../source/ref-changelog.md:427 ../../source/ref-changelog.md:583 +#: ../../source/ref-changelog.md:654 ../../source/ref-changelog.md:691 +#: ../../source/ref-changelog.md:734 +msgid "Incompatible changes" +msgstr "" + +#: ../../source/ref-changelog.md:67 +msgid "" +"**Remove support for Python 3.7** " +"([#2280](https://github.com/adap/flower/pull/2280), " +"[#2299](https://github.com/adap/flower/pull/2299), " +"[#2304](https://github.com/adap/flower/pull/2304), " +"[#2306](https://github.com/adap/flower/pull/2306), " +"[#2355](https://github.com/adap/flower/pull/2355), " +"[#2356](https://github.com/adap/flower/pull/2356))" +msgstr "" + +#: ../../source/ref-changelog.md:69 +msgid "" +"Python 3.7 support was deprecated in Flower 1.5, and this release removes" +" support. Flower now requires Python 3.8." +msgstr "" + +#: ../../source/ref-changelog.md:71 +msgid "" +"**Remove experimental argument** `rest` **from** `start_client` " +"([#2324](https://github.com/adap/flower/pull/2324))" +msgstr "" + +#: ../../source/ref-changelog.md:73 +msgid "" +"The (still experimental) argument `rest` was removed from `start_client` " +"and `start_numpy_client`. Use `transport=\"rest\"` to opt into the " +"experimental REST API instead." +msgstr "" + +#: ../../source/ref-changelog.md:75 +msgid "v1.5.0 (2023-08-31)" +msgstr "" + +#: ../../source/ref-changelog.md:77 ../../source/ref-changelog.md:161 +#: ../../source/ref-changelog.md:225 ../../source/ref-changelog.md:283 +#: ../../source/ref-changelog.md:352 ../../source/ref-changelog.md:421 +msgid "Thanks to our contributors" +msgstr "" + +#: ../../source/ref-changelog.md:79 ../../source/ref-changelog.md:163 +#: ../../source/ref-changelog.md:227 ../../source/ref-changelog.md:285 +msgid "" +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" +msgstr "" + +#: ../../source/ref-changelog.md:81 +msgid "" +"`Adam Narozniak`, `Anass Anhari`, `Charles Beauville`, `Dana-Farber`, " +"`Daniel J. Beutel`, `Daniel Nata Nugraha`, `Edoardo Gabrielli`, `Gustavo " +"Bertoli`, `Heng Pan`, `Javier`, `Mahdi`, `Steven Hé (Sīchàng)`, `Taner " +"Topal`, `achiverram28`, `danielnugraha`, `eunchung`, `ruthgal` " +msgstr "" + +#: ../../source/ref-changelog.md:85 +msgid "" +"**Introduce new simulation engine** " +"([#1969](https://github.com/adap/flower/pull/1969), " +"[#2221](https://github.com/adap/flower/pull/2221), " +"[#2248](https://github.com/adap/flower/pull/2248))" +msgstr "" + +#: ../../source/ref-changelog.md:87 +msgid "" +"The new simulation engine has been rewritten from the ground up, yet it " +"remains fully backwards compatible. It offers much improved stability and" +" memory handling, especially when working with GPUs. Simulations " +"transparently adapt to different settings to scale simulation in CPU-" +"only, CPU+GPU, multi-GPU, or multi-node multi-GPU environments." +msgstr "" + +#: ../../source/ref-changelog.md:89 +msgid "" +"Comprehensive documentation includes a new [how-to run " +"simulations](https://flower.dev/docs/framework/how-to-run-" +"simulations.html) guide, new [simulation-" +"pytorch](https://flower.dev/docs/examples/simulation-pytorch.html) and " +"[simulation-tensorflow](https://flower.dev/docs/examples/simulation-" +"tensorflow.html) notebooks, and a new [YouTube tutorial " +"series](https://www.youtube.com/watch?v=cRebUIGB5RU&list=PLNG4feLHqCWlnj8a_E1A_n5zr2-8pafTB)." +msgstr "" + +#: ../../source/ref-changelog.md:91 +msgid "" +"**Restructure Flower Docs** " +"([#1824](https://github.com/adap/flower/pull/1824), " +"[#1865](https://github.com/adap/flower/pull/1865), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1887](https://github.com/adap/flower/pull/1887), " +"[#1919](https://github.com/adap/flower/pull/1919), " +"[#1922](https://github.com/adap/flower/pull/1922), " +"[#1920](https://github.com/adap/flower/pull/1920), " +"[#1923](https://github.com/adap/flower/pull/1923), " +"[#1924](https://github.com/adap/flower/pull/1924), " +"[#1962](https://github.com/adap/flower/pull/1962), " +"[#2006](https://github.com/adap/flower/pull/2006), " +"[#2133](https://github.com/adap/flower/pull/2133), " +"[#2203](https://github.com/adap/flower/pull/2203), " +"[#2215](https://github.com/adap/flower/pull/2215), " +"[#2122](https://github.com/adap/flower/pull/2122), " +"[#2223](https://github.com/adap/flower/pull/2223), " +"[#2219](https://github.com/adap/flower/pull/2219), " +"[#2232](https://github.com/adap/flower/pull/2232), " +"[#2233](https://github.com/adap/flower/pull/2233), " +"[#2234](https://github.com/adap/flower/pull/2234), " +"[#2235](https://github.com/adap/flower/pull/2235), " +"[#2237](https://github.com/adap/flower/pull/2237), " +"[#2238](https://github.com/adap/flower/pull/2238), " +"[#2242](https://github.com/adap/flower/pull/2242), " +"[#2231](https://github.com/adap/flower/pull/2231), " +"[#2243](https://github.com/adap/flower/pull/2243), " +"[#2227](https://github.com/adap/flower/pull/2227))" +msgstr "" + +#: ../../source/ref-changelog.md:93 +msgid "" +"Much effort went into a completely restructured Flower docs experience. " +"The documentation on [flower.dev/docs](flower.dev/docs) is now divided " +"into Flower Framework, Flower Baselines, Flower Android SDK, Flower iOS " +"SDK, and code example projects." +msgstr "" + +#: ../../source/ref-changelog.md:95 +msgid "" +"**Introduce Flower Swift SDK** " +"([#1858](https://github.com/adap/flower/pull/1858), " +"[#1897](https://github.com/adap/flower/pull/1897))" +msgstr "" + +#: ../../source/ref-changelog.md:97 +msgid "" +"This is the first preview release of the Flower Swift SDK. Flower support" +" on iOS is improving, and alongside the Swift SDK and code example, there" +" is now also an iOS quickstart tutorial." +msgstr "" + +#: ../../source/ref-changelog.md:99 +msgid "" +"**Introduce Flower Android SDK** " +"([#2131](https://github.com/adap/flower/pull/2131))" +msgstr "" + +#: ../../source/ref-changelog.md:101 +msgid "" +"This is the first preview release of the Flower Kotlin SDK. Flower " +"support on Android is improving, and alongside the Kotlin SDK and code " +"example, there is now also an Android quickstart tutorial." +msgstr "" + +#: ../../source/ref-changelog.md:103 +msgid "" +"**Introduce new end-to-end testing infrastructure** " +"([#1842](https://github.com/adap/flower/pull/1842), " +"[#2071](https://github.com/adap/flower/pull/2071), " +"[#2072](https://github.com/adap/flower/pull/2072), " +"[#2068](https://github.com/adap/flower/pull/2068), " +"[#2067](https://github.com/adap/flower/pull/2067), " +"[#2069](https://github.com/adap/flower/pull/2069), " +"[#2073](https://github.com/adap/flower/pull/2073), " +"[#2070](https://github.com/adap/flower/pull/2070), " +"[#2074](https://github.com/adap/flower/pull/2074), " +"[#2082](https://github.com/adap/flower/pull/2082), " +"[#2084](https://github.com/adap/flower/pull/2084), " +"[#2093](https://github.com/adap/flower/pull/2093), " +"[#2109](https://github.com/adap/flower/pull/2109), " +"[#2095](https://github.com/adap/flower/pull/2095), " +"[#2140](https://github.com/adap/flower/pull/2140), " +"[#2137](https://github.com/adap/flower/pull/2137), " +"[#2165](https://github.com/adap/flower/pull/2165))" +msgstr "" + +#: ../../source/ref-changelog.md:105 +msgid "" +"A new testing infrastructure ensures that new changes stay compatible " +"with existing framework integrations or strategies." +msgstr "" + +#: ../../source/ref-changelog.md:107 +msgid "**Deprecate Python 3.7**" +msgstr "" + +#: ../../source/ref-changelog.md:109 +msgid "" +"Since Python 3.7 reached its end of life (EOL) on 2023-06-27, support for" +" Python 3.7 is now deprecated and will be removed in an upcoming release." +msgstr "" + +#: ../../source/ref-changelog.md:111 +msgid "" +"**Add new** `FedTrimmedAvg` **strategy** " +"([#1769](https://github.com/adap/flower/pull/1769), " +"[#1853](https://github.com/adap/flower/pull/1853))" +msgstr "" + +#: ../../source/ref-changelog.md:113 +msgid "" +"The new `FedTrimmedAvg` strategy implements Trimmed Mean by [Dong Yin, " +"2018](https://arxiv.org/abs/1803.01498)." +msgstr "" + +#: ../../source/ref-changelog.md:115 +msgid "" +"**Introduce start_driver** " +"([#1697](https://github.com/adap/flower/pull/1697))" +msgstr "" + +#: ../../source/ref-changelog.md:117 +msgid "" +"In addition to `start_server` and using the raw Driver API, there is a " +"new `start_driver` function that allows for running `start_server` " +"scripts as a Flower driver with only a single-line code change. Check out" +" the `mt-pytorch` code example to see a working example using " +"`start_driver`." +msgstr "" + +#: ../../source/ref-changelog.md:119 +msgid "" +"**Add parameter aggregation to** `mt-pytorch` **code example** " +"([#1785](https://github.com/adap/flower/pull/1785))" +msgstr "" + +#: ../../source/ref-changelog.md:121 +msgid "" +"The `mt-pytorch` example shows how to aggregate parameters when writing a" +" driver script. The included `driver.py` and `server.py` have been " +"aligned to demonstrate both the low-level way and the high-level way of " +"building server-side logic." +msgstr "" + +#: ../../source/ref-changelog.md:123 +msgid "" +"**Migrate experimental REST API to Starlette** " +"([2171](https://github.com/adap/flower/pull/2171))" +msgstr "" + +#: ../../source/ref-changelog.md:125 +msgid "" +"The (experimental) REST API used to be implemented in " +"[FastAPI](https://fastapi.tiangolo.com/), but it has now been migrated to" +" use [Starlette](https://www.starlette.io/) directly." +msgstr "" + +#: ../../source/ref-changelog.md:127 +msgid "" +"Please note: The REST request-response API is still experimental and will" +" likely change significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:129 +msgid "" +"**Introduce experimental gRPC request-response API** " +"([#1867](https://github.com/adap/flower/pull/1867), " +"[#1901](https://github.com/adap/flower/pull/1901))" +msgstr "" + +#: ../../source/ref-changelog.md:131 +msgid "" +"In addition to the existing gRPC API (based on bidirectional streaming) " +"and the experimental REST API, there is now a new gRPC API that uses a " +"request-response model to communicate with client nodes." +msgstr "" + +#: ../../source/ref-changelog.md:133 +msgid "" +"Please note: The gRPC request-response API is still experimental and will" +" likely change significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:135 +msgid "" +"**Replace the experimental** `start_client(rest=True)` **with the new** " +"`start_client(transport=\"rest\")` " +"([#1880](https://github.com/adap/flower/pull/1880))" +msgstr "" + +#: ../../source/ref-changelog.md:137 +msgid "" +"The (experimental) `start_client` argument `rest` was deprecated in " +"favour of a new argument `transport`. `start_client(transport=\"rest\")` " +"will yield the same behaviour as `start_client(rest=True)` did before. " +"All code should migrate to the new argument `transport`. The deprecated " +"argument `rest` will be removed in a future release." +msgstr "" + +#: ../../source/ref-changelog.md:139 +msgid "" +"**Add a new gRPC option** " +"([#2197](https://github.com/adap/flower/pull/2197))" +msgstr "" + +#: ../../source/ref-changelog.md:141 +msgid "" +"We now start a gRPC server with the `grpc.keepalive_permit_without_calls`" +" option set to 0 by default. This prevents the clients from sending " +"keepalive pings when there is no outstanding stream." +msgstr "" + +#: ../../source/ref-changelog.md:143 +msgid "" +"**Improve example notebooks** " +"([#2005](https://github.com/adap/flower/pull/2005))" +msgstr "" + +#: ../../source/ref-changelog.md:145 +msgid "There's a new 30min Federated Learning PyTorch tutorial!" +msgstr "" + +#: ../../source/ref-changelog.md:147 +msgid "" +"**Example updates** ([#1772](https://github.com/adap/flower/pull/1772), " +"[#1873](https://github.com/adap/flower/pull/1873), " +"[#1981](https://github.com/adap/flower/pull/1981), " +"[#1988](https://github.com/adap/flower/pull/1988), " +"[#1984](https://github.com/adap/flower/pull/1984), " +"[#1982](https://github.com/adap/flower/pull/1982), " +"[#2112](https://github.com/adap/flower/pull/2112), " +"[#2144](https://github.com/adap/flower/pull/2144), " +"[#2174](https://github.com/adap/flower/pull/2174), " +"[#2225](https://github.com/adap/flower/pull/2225), " +"[#2183](https://github.com/adap/flower/pull/2183))" +msgstr "" + +#: ../../source/ref-changelog.md:149 +msgid "" +"Many examples have received significant updates, including simplified " +"advanced-tensorflow and advanced-pytorch examples, improved macOS " +"compatibility of TensorFlow examples, and code examples for simulation. A" +" major upgrade is that all code examples now have a `requirements.txt` " +"(in addition to `pyproject.toml`)." +msgstr "" + +#: ../../source/ref-changelog.md:151 +msgid "" +"**General improvements** " +"([#1872](https://github.com/adap/flower/pull/1872), " +"[#1866](https://github.com/adap/flower/pull/1866), " +"[#1884](https://github.com/adap/flower/pull/1884), " +"[#1837](https://github.com/adap/flower/pull/1837), " +"[#1477](https://github.com/adap/flower/pull/1477), " +"[#2171](https://github.com/adap/flower/pull/2171))" +msgstr "" + +#: ../../source/ref-changelog.md:157 ../../source/ref-changelog.md:221 +#: ../../source/ref-changelog.md:279 ../../source/ref-changelog.md:348 +#: ../../source/ref-changelog.md:410 +msgid "None" +msgstr "" + +#: ../../source/ref-changelog.md:159 +msgid "v1.4.0 (2023-04-21)" +msgstr "" + +#: ../../source/ref-changelog.md:165 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Chenyang Ma (Danny)`, `Daniel J. Beutel`, `Edoardo`, `Gautam Jajoo`, " +"`Iacob-Alexandru-Andrei`, `JDRanpariya`, `Jean Charle Yaacoub`, `Kunal " +"Sarkhel`, `L. Jiang`, `Lennart Behme`, `Max Kapsecker`, `Michał`, `Nic " +"Lane`, `Nikolaos Episkopos`, `Ragy`, `Saurav Maheshkar`, `Semo Yang`, " +"`Steve Laskaridis`, `Steven Hé (Sīchàng)`, `Taner Topal`" +msgstr "" + +#: ../../source/ref-changelog.md:169 +msgid "" +"**Introduce support for XGBoost (**`FedXgbNnAvg` **strategy and " +"example)** ([#1694](https://github.com/adap/flower/pull/1694), " +"[#1709](https://github.com/adap/flower/pull/1709), " +"[#1715](https://github.com/adap/flower/pull/1715), " +"[#1717](https://github.com/adap/flower/pull/1717), " +"[#1763](https://github.com/adap/flower/pull/1763), " +"[#1795](https://github.com/adap/flower/pull/1795))" +msgstr "" + +#: ../../source/ref-changelog.md:171 +msgid "" +"XGBoost is a tree-based ensemble machine learning algorithm that uses " +"gradient boosting to improve model accuracy. We added a new `FedXgbNnAvg`" +" " +"[strategy](https://github.com/adap/flower/tree/main/src/py/flwr/server/strategy/fedxgb_nn_avg.py)," +" and a [code " +"example](https://github.com/adap/flower/tree/main/examples/quickstart_xgboost_horizontal)" +" that demonstrates the usage of this new strategy in an XGBoost project." +msgstr "" + +#: ../../source/ref-changelog.md:173 +msgid "" +"**Introduce iOS SDK (preview)** " +"([#1621](https://github.com/adap/flower/pull/1621), " +"[#1764](https://github.com/adap/flower/pull/1764))" +msgstr "" + +#: ../../source/ref-changelog.md:175 +msgid "" +"This is a major update for anyone wanting to implement Federated Learning" +" on iOS mobile devices. We now have a swift iOS SDK present under " +"[src/swift/flwr](https://github.com/adap/flower/tree/main/src/swift/flwr)" +" that will facilitate greatly the app creating process. To showcase its " +"use, the [iOS " +"example](https://github.com/adap/flower/tree/main/examples/ios) has also " +"been updated!" +msgstr "" + +#: ../../source/ref-changelog.md:177 +msgid "" +"**Introduce new \"What is Federated Learning?\" tutorial** " +"([#1657](https://github.com/adap/flower/pull/1657), " +"[#1721](https://github.com/adap/flower/pull/1721))" +msgstr "" + +#: ../../source/ref-changelog.md:179 +msgid "" +"A new [entry-level tutorial](https://flower.dev/docs/framework/tutorial-" +"what-is-federated-learning.html) in our documentation explains the basics" +" of Fedetated Learning. It enables anyone who's unfamiliar with Federated" +" Learning to start their journey with Flower. Forward it to anyone who's " +"interested in Federated Learning!" +msgstr "" + +#: ../../source/ref-changelog.md:181 +msgid "" +"**Introduce new Flower Baseline: FedProx MNIST** " +"([#1513](https://github.com/adap/flower/pull/1513), " +"[#1680](https://github.com/adap/flower/pull/1680), " +"[#1681](https://github.com/adap/flower/pull/1681), " +"[#1679](https://github.com/adap/flower/pull/1679))" +msgstr "" + +#: ../../source/ref-changelog.md:183 +msgid "" +"This new baseline replicates the MNIST+CNN task from the paper [Federated" +" Optimization in Heterogeneous Networks (Li et al., " +"2018)](https://arxiv.org/abs/1812.06127). It uses the `FedProx` strategy," +" which aims at making convergence more robust in heterogenous settings." +msgstr "" + +#: ../../source/ref-changelog.md:185 +msgid "" +"**Introduce new Flower Baseline: FedAvg FEMNIST** " +"([#1655](https://github.com/adap/flower/pull/1655))" +msgstr "" + +#: ../../source/ref-changelog.md:187 +msgid "" +"This new baseline replicates an experiment evaluating the performance of " +"the FedAvg algorithm on the FEMNIST dataset from the paper [LEAF: A " +"Benchmark for Federated Settings (Caldas et al., " +"2018)](https://arxiv.org/abs/1812.01097)." +msgstr "" + +#: ../../source/ref-changelog.md:189 +msgid "" +"**Introduce (experimental) REST API** " +"([#1594](https://github.com/adap/flower/pull/1594), " +"[#1690](https://github.com/adap/flower/pull/1690), " +"[#1695](https://github.com/adap/flower/pull/1695), " +"[#1712](https://github.com/adap/flower/pull/1712), " +"[#1802](https://github.com/adap/flower/pull/1802), " +"[#1770](https://github.com/adap/flower/pull/1770), " +"[#1733](https://github.com/adap/flower/pull/1733))" +msgstr "" + +#: ../../source/ref-changelog.md:191 +msgid "" +"A new REST API has been introduced as an alternative to the gRPC-based " +"communication stack. In this initial version, the REST API only supports " +"anonymous clients." +msgstr "" + +#: ../../source/ref-changelog.md:193 +msgid "" +"Please note: The REST API is still experimental and will likely change " +"significantly over time." +msgstr "" + +#: ../../source/ref-changelog.md:195 +msgid "" +"**Improve the (experimental) Driver API** " +"([#1663](https://github.com/adap/flower/pull/1663), " +"[#1666](https://github.com/adap/flower/pull/1666), " +"[#1667](https://github.com/adap/flower/pull/1667), " +"[#1664](https://github.com/adap/flower/pull/1664), " +"[#1675](https://github.com/adap/flower/pull/1675), " +"[#1676](https://github.com/adap/flower/pull/1676), " +"[#1693](https://github.com/adap/flower/pull/1693), " +"[#1662](https://github.com/adap/flower/pull/1662), " +"[#1794](https://github.com/adap/flower/pull/1794))" +msgstr "" + +#: ../../source/ref-changelog.md:197 +msgid "" +"The Driver API is still an experimental feature, but this release " +"introduces some major upgrades. One of the main improvements is the " +"introduction of an SQLite database to store server state on disk (instead" +" of in-memory). Another improvement is that tasks (instructions or " +"results) that have been delivered will now be deleted. This greatly " +"improves the memory efficiency of a long-running Flower server." +msgstr "" + +#: ../../source/ref-changelog.md:199 +msgid "" +"**Fix spilling issues related to Ray during simulations** " +"([#1698](https://github.com/adap/flower/pull/1698))" +msgstr "" + +#: ../../source/ref-changelog.md:201 +msgid "" +"While running long simulations, `ray` was sometimes spilling huge amounts" +" of data that would make the training unable to continue. This is now " +"fixed! 🎉" +msgstr "" + +#: ../../source/ref-changelog.md:203 +msgid "" +"**Add new example using** `TabNet` **and Flower** " +"([#1725](https://github.com/adap/flower/pull/1725))" +msgstr "" + +#: ../../source/ref-changelog.md:205 +msgid "" +"TabNet is a powerful and flexible framework for training machine learning" +" models on tabular data. We now have a federated example using Flower: " +"[https://github.com/adap/flower/tree/main/examples/tabnet](https://github.com/adap/flower/tree/main/examples/quickstart_tabnet)." +msgstr "" + +#: ../../source/ref-changelog.md:207 +msgid "" +"**Add new how-to guide for monitoring simulations** " +"([#1649](https://github.com/adap/flower/pull/1649))" +msgstr "" + +#: ../../source/ref-changelog.md:209 +msgid "" +"We now have a documentation guide to help users monitor their performance" +" during simulations." +msgstr "" + +#: ../../source/ref-changelog.md:211 +msgid "" +"**Add training metrics to** `History` **object during simulations** " +"([#1696](https://github.com/adap/flower/pull/1696))" +msgstr "" + +#: ../../source/ref-changelog.md:213 +msgid "" +"The `fit_metrics_aggregation_fn` can be used to aggregate training " +"metrics, but previous releases did not save the results in the `History` " +"object. This is now the case!" +msgstr "" + +#: ../../source/ref-changelog.md:215 +msgid "" +"**General improvements** " +"([#1659](https://github.com/adap/flower/pull/1659), " +"[#1646](https://github.com/adap/flower/pull/1646), " +"[#1647](https://github.com/adap/flower/pull/1647), " +"[#1471](https://github.com/adap/flower/pull/1471), " +"[#1648](https://github.com/adap/flower/pull/1648), " +"[#1651](https://github.com/adap/flower/pull/1651), " +"[#1652](https://github.com/adap/flower/pull/1652), " +"[#1653](https://github.com/adap/flower/pull/1653), " +"[#1659](https://github.com/adap/flower/pull/1659), " +"[#1665](https://github.com/adap/flower/pull/1665), " +"[#1670](https://github.com/adap/flower/pull/1670), " +"[#1672](https://github.com/adap/flower/pull/1672), " +"[#1677](https://github.com/adap/flower/pull/1677), " +"[#1684](https://github.com/adap/flower/pull/1684), " +"[#1683](https://github.com/adap/flower/pull/1683), " +"[#1686](https://github.com/adap/flower/pull/1686), " +"[#1682](https://github.com/adap/flower/pull/1682), " +"[#1685](https://github.com/adap/flower/pull/1685), " +"[#1692](https://github.com/adap/flower/pull/1692), " +"[#1705](https://github.com/adap/flower/pull/1705), " +"[#1708](https://github.com/adap/flower/pull/1708), " +"[#1711](https://github.com/adap/flower/pull/1711), " +"[#1713](https://github.com/adap/flower/pull/1713), " +"[#1714](https://github.com/adap/flower/pull/1714), " +"[#1718](https://github.com/adap/flower/pull/1718), " +"[#1716](https://github.com/adap/flower/pull/1716), " +"[#1723](https://github.com/adap/flower/pull/1723), " +"[#1735](https://github.com/adap/flower/pull/1735), " +"[#1678](https://github.com/adap/flower/pull/1678), " +"[#1750](https://github.com/adap/flower/pull/1750), " +"[#1753](https://github.com/adap/flower/pull/1753), " +"[#1736](https://github.com/adap/flower/pull/1736), " +"[#1766](https://github.com/adap/flower/pull/1766), " +"[#1760](https://github.com/adap/flower/pull/1760), " +"[#1775](https://github.com/adap/flower/pull/1775), " +"[#1776](https://github.com/adap/flower/pull/1776), " +"[#1777](https://github.com/adap/flower/pull/1777), " +"[#1779](https://github.com/adap/flower/pull/1779), " +"[#1784](https://github.com/adap/flower/pull/1784), " +"[#1773](https://github.com/adap/flower/pull/1773), " +"[#1755](https://github.com/adap/flower/pull/1755), " +"[#1789](https://github.com/adap/flower/pull/1789), " +"[#1788](https://github.com/adap/flower/pull/1788), " +"[#1798](https://github.com/adap/flower/pull/1798), " +"[#1799](https://github.com/adap/flower/pull/1799), " +"[#1739](https://github.com/adap/flower/pull/1739), " +"[#1800](https://github.com/adap/flower/pull/1800), " +"[#1804](https://github.com/adap/flower/pull/1804), " +"[#1805](https://github.com/adap/flower/pull/1805))" +msgstr "" + +#: ../../source/ref-changelog.md:223 +msgid "v1.3.0 (2023-02-06)" +msgstr "" + +#: ../../source/ref-changelog.md:229 +msgid "" +"`Adam Narozniak`, `Alexander Viala Bellander`, `Charles Beauville`, " +"`Daniel J. Beutel`, `JDRanpariya`, `Lennart Behme`, `Taner Topal`" +msgstr "" + +#: ../../source/ref-changelog.md:233 +msgid "" +"**Add support for** `workload_id` **and** `group_id` **in Driver API** " +"([#1595](https://github.com/adap/flower/pull/1595))" +msgstr "" + +#: ../../source/ref-changelog.md:235 +msgid "" +"The (experimental) Driver API now supports a `workload_id` that can be " +"used to identify which workload a task belongs to. It also supports a new" +" `group_id` that can be used, for example, to indicate the current " +"training round. Both the `workload_id` and `group_id` enable client nodes" +" to decide whether they want to handle a task or not." +msgstr "" + +#: ../../source/ref-changelog.md:237 +msgid "" +"**Make Driver API and Fleet API address configurable** " +"([#1637](https://github.com/adap/flower/pull/1637))" +msgstr "" + +#: ../../source/ref-changelog.md:239 +msgid "" +"The (experimental) long-running Flower server (Driver API and Fleet API) " +"can now configure the server address of both Driver API (via `--driver-" +"api-address`) and Fleet API (via `--fleet-api-address`) when starting:" +msgstr "" + +#: ../../source/ref-changelog.md:241 +msgid "" +"`flower-server --driver-api-address \"0.0.0.0:8081\" --fleet-api-address " +"\"0.0.0.0:8086\"`" +msgstr "" + +#: ../../source/ref-changelog.md:243 +msgid "Both IPv4 and IPv6 addresses are supported." +msgstr "" + +#: ../../source/ref-changelog.md:245 +msgid "" +"**Add new example of Federated Learning using fastai and Flower** " +"([#1598](https://github.com/adap/flower/pull/1598))" +msgstr "" + +#: ../../source/ref-changelog.md:247 +msgid "" +"A new code example (`quickstart_fastai`) demonstrates federated learning " +"with [fastai](https://www.fast.ai/) and Flower. You can find it here: " +"[quickstart_fastai](https://github.com/adap/flower/tree/main/examples/quickstart_fastai)." +msgstr "" + +#: ../../source/ref-changelog.md:249 +msgid "" +"**Make Android example compatible with** `flwr >= 1.0.0` **and the latest" +" versions of Android** " +"([#1603](https://github.com/adap/flower/pull/1603))" +msgstr "" + +#: ../../source/ref-changelog.md:251 +msgid "" +"The Android code example has received a substantial update: the project " +"is compatible with Flower 1.0 (and later), the UI received a full " +"refresh, and the project is updated to be compatible with newer Android " +"tooling." +msgstr "" + +#: ../../source/ref-changelog.md:253 +msgid "" +"**Add new `FedProx` strategy** " +"([#1619](https://github.com/adap/flower/pull/1619))" +msgstr "" + +#: ../../source/ref-changelog.md:255 +msgid "" +"This " +"[strategy](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedprox.py)" +" is almost identical to " +"[`FedAvg`](https://github.com/adap/flower/blob/main/src/py/flwr/server/strategy/fedavg.py)," +" but helps users replicate what is described in this " +"[paper](https://arxiv.org/abs/1812.06127). It essentially adds a " +"parameter called `proximal_mu` to regularize the local models with " +"respect to the global models." +msgstr "" + +#: ../../source/ref-changelog.md:257 +msgid "" +"**Add new metrics to telemetry events** " +"([#1640](https://github.com/adap/flower/pull/1640))" +msgstr "" + +#: ../../source/ref-changelog.md:259 +msgid "" +"An updated event structure allows, for example, the clustering of events " +"within the same workload." +msgstr "" + +#: ../../source/ref-changelog.md:261 +msgid "" +"**Add new custom strategy tutorial section** " +"[#1623](https://github.com/adap/flower/pull/1623)" +msgstr "" + +#: ../../source/ref-changelog.md:263 +msgid "" +"The Flower tutorial now has a new section that covers implementing a " +"custom strategy from scratch: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-build-a-strategy-from-scratch-pytorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:265 +msgid "" +"**Add new custom serialization tutorial section** " +"([#1622](https://github.com/adap/flower/pull/1622))" +msgstr "" + +#: ../../source/ref-changelog.md:267 +msgid "" +"The Flower tutorial now has a new section that covers custom " +"serialization: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/doc/source" +"/tutorial-customize-the-client-pytorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:269 +msgid "" +"**General improvements** " +"([#1638](https://github.com/adap/flower/pull/1638), " +"[#1634](https://github.com/adap/flower/pull/1634), " +"[#1636](https://github.com/adap/flower/pull/1636), " +"[#1635](https://github.com/adap/flower/pull/1635), " +"[#1633](https://github.com/adap/flower/pull/1633), " +"[#1632](https://github.com/adap/flower/pull/1632), " +"[#1631](https://github.com/adap/flower/pull/1631), " +"[#1630](https://github.com/adap/flower/pull/1630), " +"[#1627](https://github.com/adap/flower/pull/1627), " +"[#1593](https://github.com/adap/flower/pull/1593), " +"[#1616](https://github.com/adap/flower/pull/1616), " +"[#1615](https://github.com/adap/flower/pull/1615), " +"[#1607](https://github.com/adap/flower/pull/1607), " +"[#1609](https://github.com/adap/flower/pull/1609), " +"[#1608](https://github.com/adap/flower/pull/1608), " +"[#1603](https://github.com/adap/flower/pull/1603), " +"[#1590](https://github.com/adap/flower/pull/1590), " +"[#1580](https://github.com/adap/flower/pull/1580), " +"[#1599](https://github.com/adap/flower/pull/1599), " +"[#1600](https://github.com/adap/flower/pull/1600), " +"[#1601](https://github.com/adap/flower/pull/1601), " +"[#1597](https://github.com/adap/flower/pull/1597), " +"[#1595](https://github.com/adap/flower/pull/1595), " +"[#1591](https://github.com/adap/flower/pull/1591), " +"[#1588](https://github.com/adap/flower/pull/1588), " +"[#1589](https://github.com/adap/flower/pull/1589), " +"[#1587](https://github.com/adap/flower/pull/1587), " +"[#1573](https://github.com/adap/flower/pull/1573), " +"[#1581](https://github.com/adap/flower/pull/1581), " +"[#1578](https://github.com/adap/flower/pull/1578), " +"[#1574](https://github.com/adap/flower/pull/1574), " +"[#1572](https://github.com/adap/flower/pull/1572), " +"[#1586](https://github.com/adap/flower/pull/1586))" +msgstr "" + +#: ../../source/ref-changelog.md:273 +msgid "" +"**Updated documentation** " +"([#1629](https://github.com/adap/flower/pull/1629), " +"[#1628](https://github.com/adap/flower/pull/1628), " +"[#1620](https://github.com/adap/flower/pull/1620), " +"[#1618](https://github.com/adap/flower/pull/1618), " +"[#1617](https://github.com/adap/flower/pull/1617), " +"[#1613](https://github.com/adap/flower/pull/1613), " +"[#1614](https://github.com/adap/flower/pull/1614))" +msgstr "" + +#: ../../source/ref-changelog.md:275 ../../source/ref-changelog.md:342 +msgid "" +"As usual, the documentation has improved quite a bit. It is another step " +"in our effort to make the Flower documentation the best documentation of " +"any project. Stay tuned and as always, feel free to provide feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:281 +msgid "v1.2.0 (2023-01-13)" +msgstr "" + +#: ../../source/ref-changelog.md:287 +msgid "" +"`Adam Narozniak`, `Charles Beauville`, `Daniel J. Beutel`, `Edoardo`, `L." +" Jiang`, `Ragy`, `Taner Topal`, `dannymcy`" +msgstr "" + +#: ../../source/ref-changelog.md:291 +msgid "" +"**Introduce new Flower Baseline: FedAvg MNIST** " +"([#1497](https://github.com/adap/flower/pull/1497), " +"[#1552](https://github.com/adap/flower/pull/1552))" +msgstr "" + +#: ../../source/ref-changelog.md:293 +msgid "" +"Over the coming weeks, we will be releasing a number of new reference " +"implementations useful especially to FL newcomers. They will typically " +"revisit well known papers from the literature, and be suitable for " +"integration in your own application or for experimentation, in order to " +"deepen your knowledge of FL in general. Today's release is the first in " +"this series. [Read more.](https://flower.dev/blog/2023-01-12-fl-starter-" +"pack-fedavg-mnist-cnn/)" +msgstr "" + +#: ../../source/ref-changelog.md:295 +msgid "" +"**Improve GPU support in simulations** " +"([#1555](https://github.com/adap/flower/pull/1555))" +msgstr "" + +#: ../../source/ref-changelog.md:297 +msgid "" +"The Ray-based Virtual Client Engine (`start_simulation`) has been updated" +" to improve GPU support. The update includes some of the hard-earned " +"lessons from scaling simulations in GPU cluster environments. New " +"defaults make running GPU-based simulations substantially more robust." +msgstr "" + +#: ../../source/ref-changelog.md:299 +msgid "" +"**Improve GPU support in Jupyter Notebook tutorials** " +"([#1527](https://github.com/adap/flower/pull/1527), " +"[#1558](https://github.com/adap/flower/pull/1558))" +msgstr "" + +#: ../../source/ref-changelog.md:301 +msgid "" +"Some users reported that Jupyter Notebooks have not always been easy to " +"use on GPU instances. We listened and made improvements to all of our " +"Jupyter notebooks! Check out the updated notebooks here:" +msgstr "" + +#: ../../source/ref-changelog.md:303 +msgid "" +"[An Introduction to Federated Learning](https://flower.dev/docs/framework" +"/tutorial-get-started-with-flower-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:304 +msgid "" +"[Strategies in Federated Learning](https://flower.dev/docs/framework" +"/tutorial-use-a-federated-learning-strategy-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:305 +msgid "" +"[Building a Strategy](https://flower.dev/docs/framework/tutorial-build-a" +"-strategy-from-scratch-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:306 +msgid "" +"[Client and NumPyClient](https://flower.dev/docs/framework/tutorial-" +"customize-the-client-pytorch.html)" +msgstr "" + +#: ../../source/ref-changelog.md:308 +msgid "" +"**Introduce optional telemetry** " +"([#1533](https://github.com/adap/flower/pull/1533), " +"[#1544](https://github.com/adap/flower/pull/1544), " +"[#1584](https://github.com/adap/flower/pull/1584))" +msgstr "" + +#: ../../source/ref-changelog.md:310 +msgid "" +"After a [request for " +"feedback](https://github.com/adap/flower/issues/1534) from the community," +" the Flower open-source project introduces optional collection of " +"*anonymous* usage metrics to make well-informed decisions to improve " +"Flower. Doing this enables the Flower team to understand how Flower is " +"used and what challenges users might face." +msgstr "" + +#: ../../source/ref-changelog.md:312 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users who do not want to share anonymous usage metrics. " +"[Read more.](https://flower.dev/docs/telemetry.html)." +msgstr "" + +#: ../../source/ref-changelog.md:314 +msgid "" +"**Introduce (experimental) Driver API** " +"([#1520](https://github.com/adap/flower/pull/1520), " +"[#1525](https://github.com/adap/flower/pull/1525), " +"[#1545](https://github.com/adap/flower/pull/1545), " +"[#1546](https://github.com/adap/flower/pull/1546), " +"[#1550](https://github.com/adap/flower/pull/1550), " +"[#1551](https://github.com/adap/flower/pull/1551), " +"[#1567](https://github.com/adap/flower/pull/1567))" +msgstr "" + +#: ../../source/ref-changelog.md:316 +msgid "" +"Flower now has a new (experimental) Driver API which will enable fully " +"programmable, async, and multi-tenant Federated Learning and Federated " +"Analytics applications. Phew, that's a lot! Going forward, the Driver API" +" will be the abstraction that many upcoming features will be built on - " +"and you can start building those things now, too." +msgstr "" + +#: ../../source/ref-changelog.md:318 +msgid "" +"The Driver API also enables a new execution mode in which the server runs" +" indefinitely. Multiple individual workloads can run concurrently and " +"start and stop their execution independent of the server. This is " +"especially useful for users who want to deploy Flower in production." +msgstr "" + +#: ../../source/ref-changelog.md:320 +msgid "" +"To learn more, check out the `mt-pytorch` code example. We look forward " +"to you feedback!" +msgstr "" + +#: ../../source/ref-changelog.md:322 +msgid "" +"Please note: *The Driver API is still experimental and will likely change" +" significantly over time.*" +msgstr "" + +#: ../../source/ref-changelog.md:324 +msgid "" +"**Add new Federated Analytics with Pandas example** " +"([#1469](https://github.com/adap/flower/pull/1469), " +"[#1535](https://github.com/adap/flower/pull/1535))" +msgstr "" + +#: ../../source/ref-changelog.md:326 +msgid "" +"A new code example (`quickstart_pandas`) demonstrates federated analytics" +" with Pandas and Flower. You can find it here: " +"[quickstart_pandas](https://github.com/adap/flower/tree/main/examples/quickstart_pandas)." +msgstr "" + +#: ../../source/ref-changelog.md:328 +msgid "" +"**Add new strategies: Krum and MultiKrum** " +"([#1481](https://github.com/adap/flower/pull/1481))" +msgstr "" + +#: ../../source/ref-changelog.md:330 +msgid "" +"Edoardo, a computer science student at the Sapienza University of Rome, " +"contributed a new `Krum` strategy that enables users to easily use Krum " +"and MultiKrum in their workloads." +msgstr "" + +#: ../../source/ref-changelog.md:332 +msgid "" +"**Update C++ example to be compatible with Flower v1.2.0** " +"([#1495](https://github.com/adap/flower/pull/1495))" +msgstr "" + +#: ../../source/ref-changelog.md:334 +msgid "" +"The C++ code example has received a substantial update to make it " +"compatible with the latest version of Flower." +msgstr "" + +#: ../../source/ref-changelog.md:336 +msgid "" +"**General improvements** " +"([#1491](https://github.com/adap/flower/pull/1491), " +"[#1504](https://github.com/adap/flower/pull/1504), " +"[#1506](https://github.com/adap/flower/pull/1506), " +"[#1514](https://github.com/adap/flower/pull/1514), " +"[#1522](https://github.com/adap/flower/pull/1522), " +"[#1523](https://github.com/adap/flower/pull/1523), " +"[#1526](https://github.com/adap/flower/pull/1526), " +"[#1528](https://github.com/adap/flower/pull/1528), " +"[#1547](https://github.com/adap/flower/pull/1547), " +"[#1549](https://github.com/adap/flower/pull/1549), " +"[#1560](https://github.com/adap/flower/pull/1560), " +"[#1564](https://github.com/adap/flower/pull/1564), " +"[#1566](https://github.com/adap/flower/pull/1566))" +msgstr "" + +#: ../../source/ref-changelog.md:340 +msgid "" +"**Updated documentation** " +"([#1494](https://github.com/adap/flower/pull/1494), " +"[#1496](https://github.com/adap/flower/pull/1496), " +"[#1500](https://github.com/adap/flower/pull/1500), " +"[#1503](https://github.com/adap/flower/pull/1503), " +"[#1505](https://github.com/adap/flower/pull/1505), " +"[#1524](https://github.com/adap/flower/pull/1524), " +"[#1518](https://github.com/adap/flower/pull/1518), " +"[#1519](https://github.com/adap/flower/pull/1519), " +"[#1515](https://github.com/adap/flower/pull/1515))" +msgstr "" + +#: ../../source/ref-changelog.md:344 +msgid "" +"One highlight is the new [first time contributor " +"guide](https://flower.dev/docs/first-time-contributors.html): if you've " +"never contributed on GitHub before, this is the perfect place to start!" +msgstr "" + +#: ../../source/ref-changelog.md:350 +msgid "v1.1.0 (2022-10-31)" +msgstr "" + +#: ../../source/ref-changelog.md:354 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made the new version of Flower possible (in `git shortlog` order):" +msgstr "" + +#: ../../source/ref-changelog.md:356 +msgid "" +"`Akis Linardos`, `Christopher S`, `Daniel J. Beutel`, `George`, `Jan " +"Schlicht`, `Mohammad Fares`, `Pedro Porto Buarque de Gusmão`, `Philipp " +"Wiesner`, `Rob Luke`, `Taner Topal`, `VasundharaAgarwal`, " +"`danielnugraha`, `edogab33`" +msgstr "" + +#: ../../source/ref-changelog.md:360 +msgid "" +"**Introduce Differential Privacy wrappers (preview)** " +"([#1357](https://github.com/adap/flower/pull/1357), " +"[#1460](https://github.com/adap/flower/pull/1460))" +msgstr "" + +#: ../../source/ref-changelog.md:362 +msgid "" +"The first (experimental) preview of pluggable Differential Privacy " +"wrappers enables easy configuration and usage of differential privacy " +"(DP). The pluggable DP wrappers enable framework-agnostic **and** " +"strategy-agnostic usage of both client-side DP and server-side DP. Head " +"over to the Flower docs, a new explainer goes into more detail." +msgstr "" + +#: ../../source/ref-changelog.md:364 +msgid "" +"**New iOS CoreML code example** " +"([#1289](https://github.com/adap/flower/pull/1289))" +msgstr "" + +#: ../../source/ref-changelog.md:366 +msgid "" +"Flower goes iOS! A massive new code example shows how Flower clients can " +"be built for iOS. The code example contains both Flower iOS SDK " +"components that can be used for many tasks, and one task example running " +"on CoreML." +msgstr "" + +#: ../../source/ref-changelog.md:368 +msgid "" +"**New FedMedian strategy** " +"([#1461](https://github.com/adap/flower/pull/1461))" +msgstr "" + +#: ../../source/ref-changelog.md:370 +msgid "" +"The new `FedMedian` strategy implements Federated Median (FedMedian) by " +"[Yin et al., 2018](https://arxiv.org/pdf/1803.01498v1.pdf)." +msgstr "" + +#: ../../source/ref-changelog.md:372 +msgid "" +"**Log** `Client` **exceptions in Virtual Client Engine** " +"([#1493](https://github.com/adap/flower/pull/1493))" +msgstr "" + +#: ../../source/ref-changelog.md:374 +msgid "" +"All `Client` exceptions happening in the VCE are now logged by default " +"and not just exposed to the configured `Strategy` (via the `failures` " +"argument)." +msgstr "" + +#: ../../source/ref-changelog.md:376 +msgid "" +"**Improve Virtual Client Engine internals** " +"([#1401](https://github.com/adap/flower/pull/1401), " +"[#1453](https://github.com/adap/flower/pull/1453))" +msgstr "" + +#: ../../source/ref-changelog.md:378 +msgid "" +"Some internals of the Virtual Client Engine have been revamped. The VCE " +"now uses Ray 2.0 under the hood, the value type of the `client_resources`" +" dictionary changed to `float` to allow fractions of resources to be " +"allocated." +msgstr "" + +#: ../../source/ref-changelog.md:380 +msgid "" +"**Support optional** `Client`**/**`NumPyClient` **methods in Virtual " +"Client Engine**" +msgstr "" + +#: ../../source/ref-changelog.md:382 +msgid "" +"The Virtual Client Engine now has full support for optional `Client` (and" +" `NumPyClient`) methods." +msgstr "" + +#: ../../source/ref-changelog.md:384 +msgid "" +"**Provide type information to packages using** `flwr` " +"([#1377](https://github.com/adap/flower/pull/1377))" +msgstr "" + +#: ../../source/ref-changelog.md:386 +msgid "" +"The package `flwr` is now bundled with a `py.typed` file indicating that " +"the package is typed. This enables typing support for projects or " +"packages that use `flwr` by enabling them to improve their code using " +"static type checkers like `mypy`." +msgstr "" + +#: ../../source/ref-changelog.md:388 +msgid "" +"**Updated code example** " +"([#1344](https://github.com/adap/flower/pull/1344), " +"[#1347](https://github.com/adap/flower/pull/1347))" +msgstr "" + +#: ../../source/ref-changelog.md:390 +msgid "" +"The code examples covering scikit-learn and PyTorch Lightning have been " +"updated to work with the latest version of Flower." +msgstr "" + +#: ../../source/ref-changelog.md:392 +msgid "" +"**Updated documentation** " +"([#1355](https://github.com/adap/flower/pull/1355), " +"[#1558](https://github.com/adap/flower/pull/1558), " +"[#1379](https://github.com/adap/flower/pull/1379), " +"[#1380](https://github.com/adap/flower/pull/1380), " +"[#1381](https://github.com/adap/flower/pull/1381), " +"[#1332](https://github.com/adap/flower/pull/1332), " +"[#1391](https://github.com/adap/flower/pull/1391), " +"[#1403](https://github.com/adap/flower/pull/1403), " +"[#1364](https://github.com/adap/flower/pull/1364), " +"[#1409](https://github.com/adap/flower/pull/1409), " +"[#1419](https://github.com/adap/flower/pull/1419), " +"[#1444](https://github.com/adap/flower/pull/1444), " +"[#1448](https://github.com/adap/flower/pull/1448), " +"[#1417](https://github.com/adap/flower/pull/1417), " +"[#1449](https://github.com/adap/flower/pull/1449), " +"[#1465](https://github.com/adap/flower/pull/1465), " +"[#1467](https://github.com/adap/flower/pull/1467))" +msgstr "" + +#: ../../source/ref-changelog.md:394 +msgid "" +"There have been so many documentation updates that it doesn't even make " +"sense to list them individually." +msgstr "" + +#: ../../source/ref-changelog.md:396 +msgid "" +"**Restructured documentation** " +"([#1387](https://github.com/adap/flower/pull/1387))" +msgstr "" + +#: ../../source/ref-changelog.md:398 +msgid "" +"The documentation has been restructured to make it easier to navigate. " +"This is just the first step in a larger effort to make the Flower " +"documentation the best documentation of any project ever. Stay tuned!" +msgstr "" + +#: ../../source/ref-changelog.md:400 +msgid "" +"**Open in Colab button** " +"([#1389](https://github.com/adap/flower/pull/1389))" +msgstr "" + +#: ../../source/ref-changelog.md:402 +msgid "" +"The four parts of the Flower Federated Learning Tutorial now come with a " +"new `Open in Colab` button. No need to install anything on your local " +"machine, you can now use and learn about Flower in your browser, it's " +"only a single click away." +msgstr "" + +#: ../../source/ref-changelog.md:404 +msgid "" +"**Improved tutorial** ([#1468](https://github.com/adap/flower/pull/1468)," +" [#1470](https://github.com/adap/flower/pull/1470), " +"[#1472](https://github.com/adap/flower/pull/1472), " +"[#1473](https://github.com/adap/flower/pull/1473), " +"[#1474](https://github.com/adap/flower/pull/1474), " +"[#1475](https://github.com/adap/flower/pull/1475))" +msgstr "" + +#: ../../source/ref-changelog.md:406 +msgid "" +"The Flower Federated Learning Tutorial has two brand-new parts covering " +"custom strategies (still WIP) and the distinction between `Client` and " +"`NumPyClient`. The existing parts one and two have also been improved " +"(many small changes and fixes)." +msgstr "" + +#: ../../source/ref-changelog.md:412 +msgid "v1.0.0 (2022-07-28)" +msgstr "" + +#: ../../source/ref-changelog.md:414 +msgid "Highlights" +msgstr "" + +#: ../../source/ref-changelog.md:416 +msgid "Stable **Virtual Client Engine** (accessible via `start_simulation`)" +msgstr "" + +#: ../../source/ref-changelog.md:417 +msgid "All `Client`/`NumPyClient` methods are now optional" +msgstr "" + +#: ../../source/ref-changelog.md:418 +msgid "Configurable `get_parameters`" +msgstr "" + +#: ../../source/ref-changelog.md:419 +msgid "" +"Tons of small API cleanups resulting in a more coherent developer " +"experience" +msgstr "" + +#: ../../source/ref-changelog.md:423 +msgid "" +"We would like to give our **special thanks** to all the contributors who " +"made Flower 1.0 possible (in reverse [GitHub " +"Contributors](https://github.com/adap/flower/graphs/contributors) order):" +msgstr "" + +#: ../../source/ref-changelog.md:425 +msgid "" +"[@rtaiello](https://github.com/rtaiello), " +"[@g-pichler](https://github.com/g-pichler), [@rob-" +"luke](https://github.com/rob-luke), [@andreea-zaharia](https://github.com" +"/andreea-zaharia), [@kinshukdua](https://github.com/kinshukdua), " +"[@nfnt](https://github.com/nfnt), " +"[@tatiana-s](https://github.com/tatiana-s), " +"[@TParcollet](https://github.com/TParcollet), " +"[@vballoli](https://github.com/vballoli), " +"[@negedng](https://github.com/negedng), " +"[@RISHIKESHAVAN](https://github.com/RISHIKESHAVAN), " +"[@hei411](https://github.com/hei411), " +"[@SebastianSpeitel](https://github.com/SebastianSpeitel), " +"[@AmitChaulwar](https://github.com/AmitChaulwar), " +"[@Rubiel1](https://github.com/Rubiel1), [@FANTOME-PAN](https://github.com" +"/FANTOME-PAN), [@Rono-BC](https://github.com/Rono-BC), " +"[@lbhm](https://github.com/lbhm), " +"[@sishtiaq](https://github.com/sishtiaq), " +"[@remde](https://github.com/remde), [@Jueun-Park](https://github.com" +"/Jueun-Park), [@architjen](https://github.com/architjen), " +"[@PratikGarai](https://github.com/PratikGarai), " +"[@mrinaald](https://github.com/mrinaald), " +"[@zliel](https://github.com/zliel), " +"[@MeiruiJiang](https://github.com/MeiruiJiang), " +"[@sandracl72](https://github.com/sandracl72), " +"[@gubertoli](https://github.com/gubertoli), " +"[@Vingt100](https://github.com/Vingt100), " +"[@MakGulati](https://github.com/MakGulati), " +"[@cozek](https://github.com/cozek), " +"[@jafermarq](https://github.com/jafermarq), " +"[@sisco0](https://github.com/sisco0), " +"[@akhilmathurs](https://github.com/akhilmathurs), " +"[@CanTuerk](https://github.com/CanTuerk), " +"[@mariaboerner1987](https://github.com/mariaboerner1987), " +"[@pedropgusmao](https://github.com/pedropgusmao), " +"[@tanertopal](https://github.com/tanertopal), " +"[@danieljanes](https://github.com/danieljanes)." +msgstr "" + +#: ../../source/ref-changelog.md:429 +msgid "" +"**All arguments must be passed as keyword arguments** " +"([#1338](https://github.com/adap/flower/pull/1338))" +msgstr "" + +#: ../../source/ref-changelog.md:431 +msgid "" +"Pass all arguments as keyword arguments, positional arguments are not " +"longer supported. Code that uses positional arguments (e.g., " +"`start_client(\"127.0.0.1:8080\", FlowerClient())`) must add the keyword " +"for each positional argument (e.g., " +"`start_client(server_address=\"127.0.0.1:8080\", " +"client=FlowerClient())`)." +msgstr "" + +#: ../../source/ref-changelog.md:433 +msgid "" +"**Introduce configuration object** `ServerConfig` **in** `start_server` " +"**and** `start_simulation` " +"([#1317](https://github.com/adap/flower/pull/1317))" +msgstr "" + +#: ../../source/ref-changelog.md:435 +msgid "" +"Instead of a config dictionary `{\"num_rounds\": 3, \"round_timeout\": " +"600.0}`, `start_server` and `start_simulation` now expect a configuration" +" object of type `flwr.server.ServerConfig`. `ServerConfig` takes the same" +" arguments that as the previous config dict, but it makes writing type-" +"safe code easier and the default parameters values more transparent." +msgstr "" + +#: ../../source/ref-changelog.md:437 +msgid "" +"**Rename built-in strategy parameters for clarity** " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:439 +msgid "" +"The following built-in strategy parameters were renamed to improve " +"readability and consistency with other API's:" +msgstr "" + +#: ../../source/ref-changelog.md:441 +msgid "`fraction_eval` --> `fraction_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:442 +msgid "`min_eval_clients` --> `min_evaluate_clients`" +msgstr "" + +#: ../../source/ref-changelog.md:443 +msgid "`eval_fn` --> `evaluate_fn`" +msgstr "" + +#: ../../source/ref-changelog.md:445 +msgid "" +"**Update default arguments of built-in strategies** " +"([#1278](https://github.com/adap/flower/pull/1278))" +msgstr "" + +#: ../../source/ref-changelog.md:447 +msgid "" +"All built-in strategies now use `fraction_fit=1.0` and " +"`fraction_evaluate=1.0`, which means they select *all* currently " +"available clients for training and evaluation. Projects that relied on " +"the previous default values can get the previous behaviour by " +"initializing the strategy in the following way:" +msgstr "" + +#: ../../source/ref-changelog.md:449 +msgid "`strategy = FedAvg(fraction_fit=0.1, fraction_evaluate=0.1)`" +msgstr "" + +#: ../../source/ref-changelog.md:451 +msgid "" +"**Add** `server_round` **to** `Strategy.evaluate` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:453 +msgid "" +"The `Strategy` method `evaluate` now receives the current round of " +"federated learning/evaluation as the first parameter." +msgstr "" + +#: ../../source/ref-changelog.md:455 +msgid "" +"**Add** `server_round` **and** `config` **parameters to** `evaluate_fn` " +"([#1334](https://github.com/adap/flower/pull/1334))" +msgstr "" + +#: ../../source/ref-changelog.md:457 +msgid "" +"The `evaluate_fn` passed to built-in strategies like `FedAvg` now takes " +"three parameters: (1) The current round of federated learning/evaluation " +"(`server_round`), (2) the model parameters to evaluate (`parameters`), " +"and (3) a config dictionary (`config`)." +msgstr "" + +#: ../../source/ref-changelog.md:459 +msgid "" +"**Rename** `rnd` **to** `server_round` " +"([#1321](https://github.com/adap/flower/pull/1321))" +msgstr "" + +#: ../../source/ref-changelog.md:461 +msgid "" +"Several Flower methods and functions (`evaluate_fn`, `configure_fit`, " +"`aggregate_fit`, `configure_evaluate`, `aggregate_evaluate`) receive the " +"current round of federated learning/evaluation as their first parameter. " +"To improve reaability and avoid confusion with *random*, this parameter " +"has been renamed from `rnd` to `server_round`." +msgstr "" + +#: ../../source/ref-changelog.md:463 +msgid "" +"**Move** `flwr.dataset` **to** `flwr_baselines` " +"([#1273](https://github.com/adap/flower/pull/1273))" +msgstr "" + +#: ../../source/ref-changelog.md:465 +msgid "The experimental package `flwr.dataset` was migrated to Flower Baselines." +msgstr "" + +#: ../../source/ref-changelog.md:467 +msgid "" +"**Remove experimental strategies** " +"([#1280](https://github.com/adap/flower/pull/1280))" +msgstr "" + +#: ../../source/ref-changelog.md:469 +msgid "" +"Remove unmaintained experimental strategies (`FastAndSlow`, `FedFSv0`, " +"`FedFSv1`)." +msgstr "" + +#: ../../source/ref-changelog.md:471 +msgid "" +"**Rename** `Weights` **to** `NDArrays` " +"([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:473 +msgid "" +"`flwr.common.Weights` was renamed to `flwr.common.NDArrays` to better " +"capture what this type is all about." +msgstr "" + +#: ../../source/ref-changelog.md:475 +msgid "" +"**Remove antiquated** `force_final_distributed_eval` **from** " +"`start_server` ([#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:477 +msgid "" +"The `start_server` parameter `force_final_distributed_eval` has long been" +" a historic artefact, in this release it is finally gone for good." +msgstr "" + +#: ../../source/ref-changelog.md:479 +msgid "" +"**Make** `get_parameters` **configurable** " +"([#1242](https://github.com/adap/flower/pull/1242))" +msgstr "" + +#: ../../source/ref-changelog.md:481 +msgid "" +"The `get_parameters` method now accepts a configuration dictionary, just " +"like `get_properties`, `fit`, and `evaluate`." +msgstr "" + +#: ../../source/ref-changelog.md:483 +msgid "" +"**Replace** `num_rounds` **in** `start_simulation` **with new** `config` " +"**parameter** ([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" + +#: ../../source/ref-changelog.md:485 +msgid "" +"The `start_simulation` function now accepts a configuration dictionary " +"`config` instead of the `num_rounds` integer. This improves the " +"consistency between `start_simulation` and `start_server` and makes " +"transitioning between the two easier." +msgstr "" + +#: ../../source/ref-changelog.md:489 +msgid "" +"**Support Python 3.10** " +"([#1320](https://github.com/adap/flower/pull/1320))" +msgstr "" + +#: ../../source/ref-changelog.md:491 +msgid "" +"The previous Flower release introduced experimental support for Python " +"3.10, this release declares Python 3.10 support as stable." +msgstr "" + +#: ../../source/ref-changelog.md:493 +msgid "" +"**Make all** `Client` **and** `NumPyClient` **methods optional** " +"([#1260](https://github.com/adap/flower/pull/1260), " +"[#1277](https://github.com/adap/flower/pull/1277))" +msgstr "" + +#: ../../source/ref-changelog.md:495 +msgid "" +"The `Client`/`NumPyClient` methods `get_properties`, `get_parameters`, " +"`fit`, and `evaluate` are all optional. This enables writing clients that" +" implement, for example, only `fit`, but no other method. No need to " +"implement `evaluate` when using centralized evaluation!" +msgstr "" + +#: ../../source/ref-changelog.md:497 +msgid "" +"**Enable passing a** `Server` **instance to** `start_simulation` " +"([#1281](https://github.com/adap/flower/pull/1281))" +msgstr "" + +#: ../../source/ref-changelog.md:499 +msgid "" +"Similar to `start_server`, `start_simulation` now accepts a full `Server`" +" instance. This enables users to heavily customize the execution of " +"eperiments and opens the door to running, for example, async FL using the" +" Virtual Client Engine." +msgstr "" + +#: ../../source/ref-changelog.md:501 +msgid "" +"**Update code examples** " +"([#1291](https://github.com/adap/flower/pull/1291), " +"[#1286](https://github.com/adap/flower/pull/1286), " +"[#1282](https://github.com/adap/flower/pull/1282))" +msgstr "" + +#: ../../source/ref-changelog.md:503 +msgid "" +"Many code examples received small or even large maintenance updates, " +"among them are" +msgstr "" + +#: ../../source/ref-changelog.md:505 +msgid "`scikit-learn`" +msgstr "" + +#: ../../source/ref-changelog.md:506 +msgid "`simulation_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:507 +msgid "`quickstart_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:508 +msgid "`quickstart_simulation`" +msgstr "" + +#: ../../source/ref-changelog.md:509 +msgid "`quickstart_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:510 +msgid "`advanced_tensorflow`" +msgstr "" + +#: ../../source/ref-changelog.md:512 +msgid "" +"**Remove the obsolete simulation example** " +"([#1328](https://github.com/adap/flower/pull/1328))" +msgstr "" + +#: ../../source/ref-changelog.md:514 +msgid "" +"Removes the obsolete `simulation` example and renames " +"`quickstart_simulation` to `simulation_tensorflow` so it fits withs the " +"naming of `simulation_pytorch`" +msgstr "" + +#: ../../source/ref-changelog.md:516 +msgid "" +"**Update documentation** " +"([#1223](https://github.com/adap/flower/pull/1223), " +"[#1209](https://github.com/adap/flower/pull/1209), " +"[#1251](https://github.com/adap/flower/pull/1251), " +"[#1257](https://github.com/adap/flower/pull/1257), " +"[#1267](https://github.com/adap/flower/pull/1267), " +"[#1268](https://github.com/adap/flower/pull/1268), " +"[#1300](https://github.com/adap/flower/pull/1300), " +"[#1304](https://github.com/adap/flower/pull/1304), " +"[#1305](https://github.com/adap/flower/pull/1305), " +"[#1307](https://github.com/adap/flower/pull/1307))" +msgstr "" + +#: ../../source/ref-changelog.md:518 +msgid "" +"One substantial documentation update fixes multiple smaller rendering " +"issues, makes titles more succinct to improve navigation, removes a " +"deprecated library, updates documentation dependencies, includes the " +"`flwr.common` module in the API reference, includes support for markdown-" +"based documentation, migrates the changelog from `.rst` to `.md`, and " +"fixes a number of smaller details!" +msgstr "" + +#: ../../source/ref-changelog.md:520 ../../source/ref-changelog.md:575 +#: ../../source/ref-changelog.md:644 ../../source/ref-changelog.md:683 +msgid "**Minor updates**" +msgstr "" + +#: ../../source/ref-changelog.md:522 +msgid "" +"Add round number to fit and evaluate log messages " +"([#1266](https://github.com/adap/flower/pull/1266))" +msgstr "" + +#: ../../source/ref-changelog.md:523 +msgid "" +"Add secure gRPC connection to the `advanced_tensorflow` code example " +"([#847](https://github.com/adap/flower/pull/847))" +msgstr "" + +#: ../../source/ref-changelog.md:524 +msgid "" +"Update developer tooling " +"([#1231](https://github.com/adap/flower/pull/1231), " +"[#1276](https://github.com/adap/flower/pull/1276), " +"[#1301](https://github.com/adap/flower/pull/1301), " +"[#1310](https://github.com/adap/flower/pull/1310))" +msgstr "" + +#: ../../source/ref-changelog.md:525 +msgid "" +"Rename ProtoBuf messages to improve consistency " +"([#1214](https://github.com/adap/flower/pull/1214), " +"[#1258](https://github.com/adap/flower/pull/1258), " +"[#1259](https://github.com/adap/flower/pull/1259))" +msgstr "" + +#: ../../source/ref-changelog.md:527 +msgid "v0.19.0 (2022-05-18)" +msgstr "" + +#: ../../source/ref-changelog.md:531 +msgid "" +"**Flower Baselines (preview): FedOpt, FedBN, FedAvgM** " +"([#919](https://github.com/adap/flower/pull/919), " +"[#1127](https://github.com/adap/flower/pull/1127), " +"[#914](https://github.com/adap/flower/pull/914))" +msgstr "" + +#: ../../source/ref-changelog.md:533 +msgid "" +"The first preview release of Flower Baselines has arrived! We're " +"kickstarting Flower Baselines with implementations of FedOpt (FedYogi, " +"FedAdam, FedAdagrad), FedBN, and FedAvgM. Check the documentation on how " +"to use [Flower Baselines](https://flower.dev/docs/using-baselines.html). " +"With this first preview release we're also inviting the community to " +"[contribute their own baselines](https://flower.dev/docs/contributing-" +"baselines.html)." +msgstr "" + +#: ../../source/ref-changelog.md:535 +msgid "" +"**C++ client SDK (preview) and code example** " +"([#1111](https://github.com/adap/flower/pull/1111))" +msgstr "" + +#: ../../source/ref-changelog.md:537 +msgid "" +"Preview support for Flower clients written in C++. The C++ preview " +"includes a Flower client SDK and a quickstart code example that " +"demonstrates a simple C++ client using the SDK." +msgstr "" + +#: ../../source/ref-changelog.md:539 +msgid "" +"**Add experimental support for Python 3.10 and Python 3.11** " +"([#1135](https://github.com/adap/flower/pull/1135))" +msgstr "" + +#: ../../source/ref-changelog.md:541 +msgid "" +"Python 3.10 is the latest stable release of Python and Python 3.11 is due" +" to be released in October. This Flower release adds experimental support" +" for both Python versions." +msgstr "" + +#: ../../source/ref-changelog.md:543 +msgid "" +"**Aggregate custom metrics through user-provided functions** " +"([#1144](https://github.com/adap/flower/pull/1144))" +msgstr "" + +#: ../../source/ref-changelog.md:545 +msgid "" +"Custom metrics (e.g., `accuracy`) can now be aggregated without having to" +" customize the strategy. Built-in strategies support two new arguments, " +"`fit_metrics_aggregation_fn` and `evaluate_metrics_aggregation_fn`, that " +"allow passing custom metric aggregation functions." +msgstr "" + +#: ../../source/ref-changelog.md:547 +msgid "" +"**User-configurable round timeout** " +"([#1162](https://github.com/adap/flower/pull/1162))" +msgstr "" + +#: ../../source/ref-changelog.md:549 +msgid "" +"A new configuration value allows the round timeout to be set for " +"`start_server` and `start_simulation`. If the `config` dictionary " +"contains a `round_timeout` key (with a `float` value in seconds), the " +"server will wait *at least* `round_timeout` seconds before it closes the " +"connection." +msgstr "" + +#: ../../source/ref-changelog.md:551 +msgid "" +"**Enable both federated evaluation and centralized evaluation to be used " +"at the same time in all built-in strategies** " +"([#1091](https://github.com/adap/flower/pull/1091))" +msgstr "" + +#: ../../source/ref-changelog.md:553 +msgid "" +"Built-in strategies can now perform both federated evaluation (i.e., " +"client-side) and centralized evaluation (i.e., server-side) in the same " +"round. Federated evaluation can be disabled by setting `fraction_eval` to" +" `0.0`." +msgstr "" + +#: ../../source/ref-changelog.md:555 +msgid "" +"**Two new Jupyter Notebook tutorials** " +"([#1141](https://github.com/adap/flower/pull/1141))" +msgstr "" + +#: ../../source/ref-changelog.md:557 +msgid "" +"Two Jupyter Notebook tutorials (compatible with Google Colab) explain " +"basic and intermediate Flower features:" +msgstr "" + +#: ../../source/ref-changelog.md:559 +msgid "" +"*An Introduction to Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-1" +"-Intro-to-FL-PyTorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:561 +msgid "" +"*Using Strategies in Federated Learning*: [Open in " +"Colab](https://colab.research.google.com/github/adap/flower/blob/main/tutorials/Flower-2" +"-Strategies-in-FL-PyTorch.ipynb)" +msgstr "" + +#: ../../source/ref-changelog.md:563 +msgid "" +"**New FedAvgM strategy (Federated Averaging with Server Momentum)** " +"([#1076](https://github.com/adap/flower/pull/1076))" +msgstr "" + +#: ../../source/ref-changelog.md:565 +msgid "" +"The new `FedAvgM` strategy implements Federated Averaging with Server " +"Momentum \\[Hsu et al., 2019\\]." +msgstr "" + +#: ../../source/ref-changelog.md:567 +msgid "" +"**New advanced PyTorch code example** " +"([#1007](https://github.com/adap/flower/pull/1007))" +msgstr "" + +#: ../../source/ref-changelog.md:569 +msgid "" +"A new code example (`advanced_pytorch`) demonstrates advanced Flower " +"concepts with PyTorch." +msgstr "" + +#: ../../source/ref-changelog.md:571 +msgid "" +"**New JAX code example** " +"([#906](https://github.com/adap/flower/pull/906), " +"[#1143](https://github.com/adap/flower/pull/1143))" +msgstr "" + +#: ../../source/ref-changelog.md:573 +msgid "" +"A new code example (`jax_from_centralized_to_federated`) shows federated " +"learning with JAX and Flower." +msgstr "" + +#: ../../source/ref-changelog.md:577 +msgid "" +"New option to keep Ray running if Ray was already initialized in " +"`start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))" +msgstr "" + +#: ../../source/ref-changelog.md:578 +msgid "" +"Add support for custom `ClientManager` as a `start_simulation` parameter " +"([#1171](https://github.com/adap/flower/pull/1171))" +msgstr "" + +#: ../../source/ref-changelog.md:579 +msgid "" +"New documentation for [implementing " +"strategies](https://flower.dev/docs/framework/how-to-implement-" +"strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), " +"[#1175](https://github.com/adap/flower/pull/1175))" +msgstr "" + +#: ../../source/ref-changelog.md:580 +msgid "" +"New mobile-friendly documentation theme " +"([#1174](https://github.com/adap/flower/pull/1174))" +msgstr "" + +#: ../../source/ref-changelog.md:581 +msgid "" +"Limit version range for (optional) `ray` dependency to include only " +"compatible releases (`>=1.9.2,<1.12.0`) " +"([#1205](https://github.com/adap/flower/pull/1205))" +msgstr "" + +#: ../../source/ref-changelog.md:585 +msgid "" +"**Remove deprecated support for Python 3.6** " +"([#871](https://github.com/adap/flower/pull/871))" +msgstr "" + +#: ../../source/ref-changelog.md:586 +msgid "" +"**Remove deprecated KerasClient** " +"([#857](https://github.com/adap/flower/pull/857))" +msgstr "" + +#: ../../source/ref-changelog.md:587 +msgid "" +"**Remove deprecated no-op extra installs** " +"([#973](https://github.com/adap/flower/pull/973))" +msgstr "" + +#: ../../source/ref-changelog.md:588 +msgid "" +"**Remove deprecated proto fields from** `FitRes` **and** `EvaluateRes` " +"([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:589 +msgid "" +"**Remove deprecated QffedAvg strategy (replaced by QFedAvg)** " +"([#1107](https://github.com/adap/flower/pull/1107))" +msgstr "" + +#: ../../source/ref-changelog.md:590 +msgid "" +"**Remove deprecated DefaultStrategy strategy** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:591 +msgid "" +"**Remove deprecated support for eval_fn accuracy return value** " +"([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:592 +msgid "" +"**Remove deprecated support for passing initial parameters as NumPy " +"ndarrays** ([#1142](https://github.com/adap/flower/pull/1142))" +msgstr "" + +#: ../../source/ref-changelog.md:594 +msgid "v0.18.0 (2022-02-28)" +msgstr "" + +#: ../../source/ref-changelog.md:598 +msgid "" +"**Improved Virtual Client Engine compatibility with Jupyter Notebook / " +"Google Colab** ([#866](https://github.com/adap/flower/pull/866), " +"[#872](https://github.com/adap/flower/pull/872), " +"[#833](https://github.com/adap/flower/pull/833), " +"[#1036](https://github.com/adap/flower/pull/1036))" +msgstr "" + +#: ../../source/ref-changelog.md:600 +msgid "" +"Simulations (using the Virtual Client Engine through `start_simulation`) " +"now work more smoothly on Jupyter Notebooks (incl. Google Colab) after " +"installing Flower with the `simulation` extra (`pip install " +"flwr[simulation]`)." +msgstr "" + +#: ../../source/ref-changelog.md:602 +msgid "" +"**New Jupyter Notebook code example** " +"([#833](https://github.com/adap/flower/pull/833))" +msgstr "" + +#: ../../source/ref-changelog.md:604 +msgid "" +"A new code example (`quickstart_simulation`) demonstrates Flower " +"simulations using the Virtual Client Engine through Jupyter Notebook " +"(incl. Google Colab)." +msgstr "" + +#: ../../source/ref-changelog.md:606 +msgid "" +"**Client properties (feature preview)** " +"([#795](https://github.com/adap/flower/pull/795))" +msgstr "" + +#: ../../source/ref-changelog.md:608 +msgid "" +"Clients can implement a new method `get_properties` to enable server-side" +" strategies to query client properties." +msgstr "" + +#: ../../source/ref-changelog.md:610 +msgid "" +"**Experimental Android support with TFLite** " +"([#865](https://github.com/adap/flower/pull/865))" +msgstr "" + +#: ../../source/ref-changelog.md:612 +msgid "" +"Android support has finally arrived in `main`! Flower is both client-" +"agnostic and framework-agnostic by design. One can integrate arbitrary " +"client platforms and with this release, using Flower on Android has " +"become a lot easier." +msgstr "" + +#: ../../source/ref-changelog.md:614 +msgid "" +"The example uses TFLite on the client side, along with a new " +"`FedAvgAndroid` strategy. The Android client and `FedAvgAndroid` are " +"still experimental, but they are a first step towards a fully-fledged " +"Android SDK and a unified `FedAvg` implementation that integrated the new" +" functionality from `FedAvgAndroid`." +msgstr "" + +#: ../../source/ref-changelog.md:616 +msgid "" +"**Make gRPC keepalive time user-configurable and decrease default " +"keepalive time** ([#1069](https://github.com/adap/flower/pull/1069))" +msgstr "" + +#: ../../source/ref-changelog.md:618 +msgid "" +"The default gRPC keepalive time has been reduced to increase the " +"compatibility of Flower with more cloud environments (for example, " +"Microsoft Azure). Users can configure the keepalive time to customize the" +" gRPC stack based on specific requirements." +msgstr "" + +#: ../../source/ref-changelog.md:620 +msgid "" +"**New differential privacy example using Opacus and PyTorch** " +"([#805](https://github.com/adap/flower/pull/805))" +msgstr "" + +#: ../../source/ref-changelog.md:622 +msgid "" +"A new code example (`opacus`) demonstrates differentially-private " +"federated learning with Opacus, PyTorch, and Flower." +msgstr "" + +#: ../../source/ref-changelog.md:624 +msgid "" +"**New Hugging Face Transformers code example** " +"([#863](https://github.com/adap/flower/pull/863))" +msgstr "" + +#: ../../source/ref-changelog.md:626 +msgid "" +"A new code example (`quickstart_huggingface`) demonstrates usage of " +"Hugging Face Transformers with Flower." +msgstr "" + +#: ../../source/ref-changelog.md:628 +msgid "" +"**New MLCube code example** " +"([#779](https://github.com/adap/flower/pull/779), " +"[#1034](https://github.com/adap/flower/pull/1034), " +"[#1065](https://github.com/adap/flower/pull/1065), " +"[#1090](https://github.com/adap/flower/pull/1090))" +msgstr "" + +#: ../../source/ref-changelog.md:630 +msgid "" +"A new code example (`quickstart_mlcube`) demonstrates usage of MLCube " +"with Flower." +msgstr "" + +#: ../../source/ref-changelog.md:632 +msgid "" +"**SSL-enabled server and client** " +"([#842](https://github.com/adap/flower/pull/842), " +"[#844](https://github.com/adap/flower/pull/844), " +"[#845](https://github.com/adap/flower/pull/845), " +"[#847](https://github.com/adap/flower/pull/847), " +"[#993](https://github.com/adap/flower/pull/993), " +"[#994](https://github.com/adap/flower/pull/994))" +msgstr "" + +#: ../../source/ref-changelog.md:634 +msgid "" +"SSL enables secure encrypted connections between clients and servers. " +"This release open-sources the Flower secure gRPC implementation to make " +"encrypted communication channels accessible to all Flower users." +msgstr "" + +#: ../../source/ref-changelog.md:636 +msgid "" +"**Updated** `FedAdam` **and** `FedYogi` **strategies** " +"([#885](https://github.com/adap/flower/pull/885), " +"[#895](https://github.com/adap/flower/pull/895))" +msgstr "" + +#: ../../source/ref-changelog.md:638 +msgid "" +"`FedAdam` and `FedAdam` match the latest version of the Adaptive " +"Federated Optimization paper." +msgstr "" + +#: ../../source/ref-changelog.md:640 +msgid "" +"**Initialize** `start_simulation` **with a list of client IDs** " +"([#860](https://github.com/adap/flower/pull/860))" +msgstr "" + +#: ../../source/ref-changelog.md:642 +msgid "" +"`start_simulation` can now be called with a list of client IDs " +"(`clients_ids`, type: `List[str]`). Those IDs will be passed to the " +"`client_fn` whenever a client needs to be initialized, which can make it " +"easier to load data partitions that are not accessible through `int` " +"identifiers." +msgstr "" + +#: ../../source/ref-changelog.md:646 +msgid "" +"Update `num_examples` calculation in PyTorch code examples in " +"([#909](https://github.com/adap/flower/pull/909))" +msgstr "" + +#: ../../source/ref-changelog.md:647 +msgid "" +"Expose Flower version through `flwr.__version__` " +"([#952](https://github.com/adap/flower/pull/952))" +msgstr "" + +#: ../../source/ref-changelog.md:648 +msgid "" +"`start_server` in `app.py` now returns a `History` object containing " +"metrics from training ([#974](https://github.com/adap/flower/pull/974))" +msgstr "" + +#: ../../source/ref-changelog.md:649 +msgid "" +"Make `max_workers` (used by `ThreadPoolExecutor`) configurable " +"([#978](https://github.com/adap/flower/pull/978))" +msgstr "" + +#: ../../source/ref-changelog.md:650 +msgid "" +"Increase sleep time after server start to three seconds in all code " +"examples ([#1086](https://github.com/adap/flower/pull/1086))" +msgstr "" + +#: ../../source/ref-changelog.md:651 +msgid "" +"Added a new FAQ section to the documentation " +"([#948](https://github.com/adap/flower/pull/948))" +msgstr "" + +#: ../../source/ref-changelog.md:652 +msgid "" +"And many more under-the-hood changes, library updates, documentation " +"changes, and tooling improvements!" +msgstr "" + +#: ../../source/ref-changelog.md:656 +msgid "" +"**Removed** `flwr_example` **and** `flwr_experimental` **from release " +"build** ([#869](https://github.com/adap/flower/pull/869))" +msgstr "" + +#: ../../source/ref-changelog.md:658 +msgid "" +"The packages `flwr_example` and `flwr_experimental` have been deprecated " +"since Flower 0.12.0 and they are not longer included in Flower release " +"builds. The associated extras (`baseline`, `examples-pytorch`, `examples-" +"tensorflow`, `http-logger`, `ops`) are now no-op and will be removed in " +"an upcoming release." +msgstr "" + +#: ../../source/ref-changelog.md:660 +msgid "v0.17.0 (2021-09-24)" +msgstr "" + +#: ../../source/ref-changelog.md:664 +msgid "" +"**Experimental virtual client engine** " +"([#781](https://github.com/adap/flower/pull/781) " +"[#790](https://github.com/adap/flower/pull/790) " +"[#791](https://github.com/adap/flower/pull/791))" +msgstr "" + +#: ../../source/ref-changelog.md:666 +msgid "" +"One of Flower's goals is to enable research at scale. This release " +"enables a first (experimental) peek at a major new feature, codenamed the" +" virtual client engine. Virtual clients enable simulations that scale to " +"a (very) large number of clients on a single machine or compute cluster. " +"The easiest way to test the new functionality is to look at the two new " +"code examples called `quickstart_simulation` and `simulation_pytorch`." +msgstr "" + +#: ../../source/ref-changelog.md:668 +msgid "" +"The feature is still experimental, so there's no stability guarantee for " +"the API. It's also not quite ready for prime time and comes with a few " +"known caveats. However, those who are curious are encouraged to try it " +"out and share their thoughts." +msgstr "" + +#: ../../source/ref-changelog.md:670 +msgid "" +"**New built-in strategies** " +"([#828](https://github.com/adap/flower/pull/828) " +"[#822](https://github.com/adap/flower/pull/822))" +msgstr "" + +#: ../../source/ref-changelog.md:672 +msgid "" +"FedYogi - Federated learning strategy using Yogi on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" + +#: ../../source/ref-changelog.md:673 +msgid "" +"FedAdam - Federated learning strategy using Adam on server-side. " +"Implementation based on https://arxiv.org/abs/2003.00295" +msgstr "" + +#: ../../source/ref-changelog.md:675 +msgid "" +"**New PyTorch Lightning code example** " +"([#617](https://github.com/adap/flower/pull/617))" +msgstr "" + +#: ../../source/ref-changelog.md:677 +msgid "" +"**New Variational Auto-Encoder code example** " +"([#752](https://github.com/adap/flower/pull/752))" +msgstr "" + +#: ../../source/ref-changelog.md:679 +msgid "" +"**New scikit-learn code example** " +"([#748](https://github.com/adap/flower/pull/748))" +msgstr "" + +#: ../../source/ref-changelog.md:681 +msgid "" +"**New experimental TensorBoard strategy** " +"([#789](https://github.com/adap/flower/pull/789))" +msgstr "" + +#: ../../source/ref-changelog.md:685 +msgid "" +"Improved advanced TensorFlow code example " +"([#769](https://github.com/adap/flower/pull/769))" +msgstr "" + +#: ../../source/ref-changelog.md:686 +msgid "" +"Warning when `min_available_clients` is misconfigured " +"([#830](https://github.com/adap/flower/pull/830))" +msgstr "" + +#: ../../source/ref-changelog.md:687 +msgid "" +"Improved gRPC server docs " +"([#841](https://github.com/adap/flower/pull/841))" +msgstr "" + +#: ../../source/ref-changelog.md:688 +msgid "" +"Improved error message in `NumPyClient` " +"([#851](https://github.com/adap/flower/pull/851))" +msgstr "" + +#: ../../source/ref-changelog.md:689 +msgid "" +"Improved PyTorch quickstart code example " +"([#852](https://github.com/adap/flower/pull/852))" +msgstr "" + +#: ../../source/ref-changelog.md:693 +msgid "" +"**Disabled final distributed evaluation** " +"([#800](https://github.com/adap/flower/pull/800))" +msgstr "" + +#: ../../source/ref-changelog.md:695 +msgid "" +"Prior behaviour was to perform a final round of distributed evaluation on" +" all connected clients, which is often not required (e.g., when using " +"server-side evaluation). The prior behaviour can be enabled by passing " +"`force_final_distributed_eval=True` to `start_server`." +msgstr "" + +#: ../../source/ref-changelog.md:697 +msgid "" +"**Renamed q-FedAvg strategy** " +"([#802](https://github.com/adap/flower/pull/802))" +msgstr "" + +#: ../../source/ref-changelog.md:699 +msgid "" +"The strategy named `QffedAvg` was renamed to `QFedAvg` to better reflect " +"the notation given in the original paper (q-FFL is the optimization " +"objective, q-FedAvg is the proposed solver). Note the the original (now " +"deprecated) `QffedAvg` class is still available for compatibility reasons" +" (it will be removed in a future release)." +msgstr "" + +#: ../../source/ref-changelog.md:701 +msgid "" +"**Deprecated and renamed code example** `simulation_pytorch` **to** " +"`simulation_pytorch_legacy` " +"([#791](https://github.com/adap/flower/pull/791))" +msgstr "" + +#: ../../source/ref-changelog.md:703 +msgid "" +"This example has been replaced by a new example. The new example is based" +" on the experimental virtual client engine, which will become the new " +"default way of doing most types of large-scale simulations in Flower. The" +" existing example was kept for reference purposes, but it might be " +"removed in the future." +msgstr "" + +#: ../../source/ref-changelog.md:705 +msgid "v0.16.0 (2021-05-11)" +msgstr "" + +#: ../../source/ref-changelog.md:709 +msgid "" +"**New built-in strategies** " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:711 +msgid "(abstract) FedOpt" +msgstr "" + +#: ../../source/ref-changelog.md:712 +msgid "FedAdagrad" +msgstr "" + +#: ../../source/ref-changelog.md:714 +msgid "" +"**Custom metrics for server and strategies** " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" + +#: ../../source/ref-changelog.md:716 +msgid "" +"The Flower server is now fully task-agnostic, all remaining instances of " +"task-specific metrics (such as `accuracy`) have been replaced by custom " +"metrics dictionaries. Flower 0.15 introduced the capability to pass a " +"dictionary containing custom metrics from client to server. As of this " +"release, custom metrics replace task-specific metrics on the server." +msgstr "" + +#: ../../source/ref-changelog.md:718 +msgid "" +"Custom metric dictionaries are now used in two user-facing APIs: they are" +" returned from Strategy methods `aggregate_fit`/`aggregate_evaluate` and " +"they enable evaluation functions passed to build-in strategies (via " +"`eval_fn`) to return more than two evaluation metrics. Strategies can " +"even return *aggregated* metrics dictionaries for the server to keep " +"track of." +msgstr "" + +#: ../../source/ref-changelog.md:720 +msgid "" +"Stratey implementations should migrate their `aggregate_fit` and " +"`aggregate_evaluate` methods to the new return type (e.g., by simply " +"returning an empty `{}`), server-side evaluation functions should migrate" +" from `return loss, accuracy` to `return loss, {\"accuracy\": accuracy}`." +msgstr "" + +#: ../../source/ref-changelog.md:722 +msgid "" +"Flower 0.15-style return types are deprecated (but still supported), " +"compatibility will be removed in a future release." +msgstr "" + +#: ../../source/ref-changelog.md:724 +msgid "" +"**Migration warnings for deprecated functionality** " +"([#690](https://github.com/adap/flower/pull/690))" +msgstr "" + +#: ../../source/ref-changelog.md:726 +msgid "" +"Earlier versions of Flower were often migrated to new APIs, while " +"maintaining compatibility with legacy APIs. This release introduces " +"detailed warning messages if usage of deprecated APIs is detected. The " +"new warning messages often provide details on how to migrate to more " +"recent APIs, thus easing the transition from one release to another." +msgstr "" + +#: ../../source/ref-changelog.md:728 +msgid "" +"Improved docs and docstrings " +"([#691](https://github.com/adap/flower/pull/691) " +"[#692](https://github.com/adap/flower/pull/692) " +"[#713](https://github.com/adap/flower/pull/713))" +msgstr "" + +#: ../../source/ref-changelog.md:730 +msgid "MXNet example and documentation" +msgstr "" + +#: ../../source/ref-changelog.md:732 +msgid "" +"FedBN implementation in example PyTorch: From Centralized To Federated " +"([#696](https://github.com/adap/flower/pull/696) " +"[#702](https://github.com/adap/flower/pull/702) " +"[#705](https://github.com/adap/flower/pull/705))" +msgstr "" + +#: ../../source/ref-changelog.md:736 +msgid "" +"**Serialization-agnostic server** " +"([#721](https://github.com/adap/flower/pull/721))" +msgstr "" + +#: ../../source/ref-changelog.md:738 +msgid "" +"The Flower server is now fully serialization-agnostic. Prior usage of " +"class `Weights` (which represents parameters as deserialized NumPy " +"ndarrays) was replaced by class `Parameters` (e.g., in `Strategy`). " +"`Parameters` objects are fully serialization-agnostic and represents " +"parameters as byte arrays, the `tensor_type` attributes indicates how " +"these byte arrays should be interpreted (e.g., for " +"serialization/deserialization)." +msgstr "" + +#: ../../source/ref-changelog.md:740 +msgid "" +"Built-in strategies implement this approach by handling serialization and" +" deserialization to/from `Weights` internally. Custom/3rd-party Strategy " +"implementations should update to the slighly changed Strategy method " +"definitions. Strategy authors can consult PR " +"[#721](https://github.com/adap/flower/pull/721) to see how strategies can" +" easily migrate to the new format." +msgstr "" + +#: ../../source/ref-changelog.md:742 +msgid "" +"Deprecated `flwr.server.Server.evaluate`, use " +"`flwr.server.Server.evaluate_round` instead " +"([#717](https://github.com/adap/flower/pull/717))" +msgstr "" + +#: ../../source/ref-changelog.md:744 +msgid "v0.15.0 (2021-03-12)" +msgstr "" + +#: ../../source/ref-changelog.md:748 +msgid "" +"**Server-side parameter initialization** " +"([#658](https://github.com/adap/flower/pull/658))" +msgstr "" + +#: ../../source/ref-changelog.md:750 +msgid "" +"Model parameters can now be initialized on the server-side. Server-side " +"parameter initialization works via a new `Strategy` method called " +"`initialize_parameters`." +msgstr "" + +#: ../../source/ref-changelog.md:752 +msgid "" +"Built-in strategies support a new constructor argument called " +"`initial_parameters` to set the initial parameters. Built-in strategies " +"will provide these initial parameters to the server on startup and then " +"delete them to free the memory afterwards." +msgstr "" + +#: ../../source/ref-changelog.md:771 +msgid "" +"If no initial parameters are provided to the strategy, the server will " +"continue to use the current behaviour (namely, it will ask one of the " +"connected clients for its parameters and use these as the initial global " +"parameters)." +msgstr "" + +#: ../../source/ref-changelog.md:773 +msgid "Deprecations" +msgstr "" + +#: ../../source/ref-changelog.md:775 +msgid "" +"Deprecate `flwr.server.strategy.DefaultStrategy` (migrate to " +"`flwr.server.strategy.FedAvg`, which is equivalent)" +msgstr "" + +#: ../../source/ref-changelog.md:777 +msgid "v0.14.0 (2021-02-18)" +msgstr "" + +#: ../../source/ref-changelog.md:781 +msgid "" +"**Generalized** `Client.fit` **and** `Client.evaluate` **return values** " +"([#610](https://github.com/adap/flower/pull/610) " +"[#572](https://github.com/adap/flower/pull/572) " +"[#633](https://github.com/adap/flower/pull/633))" +msgstr "" + +#: ../../source/ref-changelog.md:783 +msgid "" +"Clients can now return an additional dictionary mapping `str` keys to " +"values of the following types: `bool`, `bytes`, `float`, `int`, `str`. " +"This means one can return almost arbitrary values from `fit`/`evaluate` " +"and make use of them on the server side!" +msgstr "" + +#: ../../source/ref-changelog.md:785 +msgid "" +"This improvement also allowed for more consistent return types between " +"`fit` and `evaluate`: `evaluate` should now return a tuple `(float, int, " +"dict)` representing the loss, number of examples, and a dictionary " +"holding arbitrary problem-specific values like accuracy." +msgstr "" + +#: ../../source/ref-changelog.md:787 +msgid "" +"In case you wondered: this feature is compatible with existing projects, " +"the additional dictionary return value is optional. New code should " +"however migrate to the new return types to be compatible with upcoming " +"Flower releases (`fit`: `List[np.ndarray], int, Dict[str, Scalar]`, " +"`evaluate`: `float, int, Dict[str, Scalar]`). See the example below for " +"details." +msgstr "" + +#: ../../source/ref-changelog.md:789 +msgid "" +"*Code example:* note the additional dictionary return values in both " +"`FlwrClient.fit` and `FlwrClient.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:804 +msgid "" +"**Generalized** `config` **argument in** `Client.fit` **and** " +"`Client.evaluate` ([#595](https://github.com/adap/flower/pull/595))" +msgstr "" + +#: ../../source/ref-changelog.md:806 +msgid "" +"The `config` argument used to be of type `Dict[str, str]`, which means " +"that dictionary values were expected to be strings. The new release " +"generalizes this to enable values of the following types: `bool`, " +"`bytes`, `float`, `int`, `str`." +msgstr "" + +#: ../../source/ref-changelog.md:808 +msgid "" +"This means one can now pass almost arbitrary values to `fit`/`evaluate` " +"using the `config` dictionary. Yay, no more `str(epochs)` on the server-" +"side and `int(config[\"epochs\"])` on the client side!" +msgstr "" + +#: ../../source/ref-changelog.md:810 +msgid "" +"*Code example:* note that the `config` dictionary now contains non-`str` " +"values in both `Client.fit` and `Client.evaluate`:" +msgstr "" + +#: ../../source/ref-changelog.md:827 +msgid "v0.13.0 (2021-01-08)" +msgstr "" + +#: ../../source/ref-changelog.md:831 +msgid "" +"New example: PyTorch From Centralized To Federated " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:832 +msgid "Improved documentation" +msgstr "" + +#: ../../source/ref-changelog.md:833 +msgid "New documentation theme ([#551](https://github.com/adap/flower/pull/551))" +msgstr "" + +#: ../../source/ref-changelog.md:834 +msgid "New API reference ([#554](https://github.com/adap/flower/pull/554))" +msgstr "" + +#: ../../source/ref-changelog.md:835 +msgid "" +"Updated examples documentation " +"([#549](https://github.com/adap/flower/pull/549))" +msgstr "" + +#: ../../source/ref-changelog.md:836 +msgid "" +"Removed obsolete documentation " +"([#548](https://github.com/adap/flower/pull/548))" +msgstr "" + +#: ../../source/ref-changelog.md:838 +msgid "Bugfix:" +msgstr "" + +#: ../../source/ref-changelog.md:840 +msgid "" +"`Server.fit` does not disconnect clients when finished, disconnecting the" +" clients is now handled in `flwr.server.start_server` " +"([#553](https://github.com/adap/flower/pull/553) " +"[#540](https://github.com/adap/flower/issues/540))." +msgstr "" + +#: ../../source/ref-changelog.md:842 +msgid "v0.12.0 (2020-12-07)" +msgstr "" + +#: ../../source/ref-changelog.md:844 ../../source/ref-changelog.md:860 +msgid "Important changes:" +msgstr "" + +#: ../../source/ref-changelog.md:846 +msgid "" +"Added an example for embedded devices " +"([#507](https://github.com/adap/flower/pull/507))" +msgstr "" + +#: ../../source/ref-changelog.md:847 +msgid "" +"Added a new NumPyClient (in addition to the existing KerasClient) " +"([#504](https://github.com/adap/flower/pull/504) " +"[#508](https://github.com/adap/flower/pull/508))" +msgstr "" + +#: ../../source/ref-changelog.md:848 +msgid "" +"Deprecated `flwr_example` package and started to migrate examples into " +"the top-level `examples` directory " +"([#494](https://github.com/adap/flower/pull/494) " +"[#512](https://github.com/adap/flower/pull/512))" +msgstr "" + +#: ../../source/ref-changelog.md:850 +msgid "v0.11.0 (2020-11-30)" +msgstr "" + +#: ../../source/ref-changelog.md:852 +msgid "Incompatible changes:" +msgstr "" + +#: ../../source/ref-changelog.md:854 +msgid "" +"Renamed strategy methods " +"([#486](https://github.com/adap/flower/pull/486)) to unify the naming of " +"Flower's public APIs. Other public methods/functions (e.g., every method " +"in `Client`, but also `Strategy.evaluate`) do not use the `on_` prefix, " +"which is why we're removing it from the four methods in Strategy. To " +"migrate rename the following `Strategy` methods accordingly:" +msgstr "" + +#: ../../source/ref-changelog.md:855 +msgid "`on_configure_evaluate` => `configure_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:856 +msgid "`on_aggregate_evaluate` => `aggregate_evaluate`" +msgstr "" + +#: ../../source/ref-changelog.md:857 +msgid "`on_configure_fit` => `configure_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:858 +msgid "`on_aggregate_fit` => `aggregate_fit`" +msgstr "" + +#: ../../source/ref-changelog.md:862 +msgid "" +"Deprecated `DefaultStrategy` " +"([#479](https://github.com/adap/flower/pull/479)). To migrate use " +"`FedAvg` instead." +msgstr "" + +#: ../../source/ref-changelog.md:863 +msgid "" +"Simplified examples and baselines " +"([#484](https://github.com/adap/flower/pull/484))." +msgstr "" + +#: ../../source/ref-changelog.md:864 +msgid "" +"Removed presently unused `on_conclude_round` from strategy interface " +"([#483](https://github.com/adap/flower/pull/483))." +msgstr "" + +#: ../../source/ref-changelog.md:865 +msgid "" +"Set minimal Python version to 3.6.1 instead of 3.6.9 " +"([#471](https://github.com/adap/flower/pull/471))." +msgstr "" + +#: ../../source/ref-changelog.md:866 +msgid "" +"Improved `Strategy` docstrings " +"([#470](https://github.com/adap/flower/pull/470))." +msgstr "" + +#: ../../source/ref-example-projects.rst:2 +msgid "Example projects" +msgstr "" + +#: ../../source/ref-example-projects.rst:4 +msgid "" +"Flower comes with a number of usage examples. The examples demonstrate " +"how Flower can be used to federate different kinds of existing machine " +"learning pipelines, usually leveraging popular machine learning " +"frameworks such as `PyTorch `_ or `TensorFlow " +"`_." +msgstr "" + +#: ../../source/ref-example-projects.rst:11 +msgid "" +"Flower usage examples used to be bundled with Flower in a package called " +"``flwr_example``. We are migrating those examples to standalone projects " +"to make them easier to use. All new examples are based in the directory " +"`examples `_." +msgstr "" + +#: ../../source/ref-example-projects.rst:16 +msgid "The following examples are available as standalone projects." +msgstr "" + +#: ../../source/ref-example-projects.rst:20 +msgid "Quickstart TensorFlow/Keras" +msgstr "" + +#: ../../source/ref-example-projects.rst:22 +msgid "" +"The TensorFlow/Keras quickstart example shows CIFAR-10 image " +"classification with MobileNetV2:" +msgstr "" + +#: ../../source/ref-example-projects.rst:25 +msgid "" +"`Quickstart TensorFlow (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:26 +msgid "" +"`Quickstart TensorFlow (Tutorial) `_" +msgstr "" + +#: ../../source/ref-example-projects.rst:27 +msgid "" +"`Quickstart TensorFlow (Blog Post) `_" +msgstr "" + +#: ../../source/ref-example-projects.rst:31 +#: ../../source/tutorial-quickstart-pytorch.rst:5 +msgid "Quickstart PyTorch" +msgstr "" + +#: ../../source/ref-example-projects.rst:33 +msgid "" +"The PyTorch quickstart example shows CIFAR-10 image classification with a" +" simple Convolutional Neural Network:" +msgstr "" + +#: ../../source/ref-example-projects.rst:36 +msgid "" +"`Quickstart PyTorch (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:37 +msgid "" +"`Quickstart PyTorch (Tutorial) `_" +msgstr "" + +#: ../../source/ref-example-projects.rst:41 +msgid "PyTorch: From Centralized To Federated" +msgstr "" + +#: ../../source/ref-example-projects.rst:43 +msgid "" +"This example shows how a regular PyTorch project can be federated using " +"Flower:" +msgstr "" + +#: ../../source/ref-example-projects.rst:45 +msgid "" +"`PyTorch: From Centralized To Federated (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:46 +msgid "" +"`PyTorch: From Centralized To Federated (Tutorial) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:50 +msgid "Federated Learning on Raspberry Pi and Nvidia Jetson" +msgstr "" + +#: ../../source/ref-example-projects.rst:52 +msgid "" +"This example shows how Flower can be used to build a federated learning " +"system that run across Raspberry Pi and Nvidia Jetson:" +msgstr "" + +#: ../../source/ref-example-projects.rst:54 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Code) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:55 +msgid "" +"`Federated Learning on Raspberry Pi and Nvidia Jetson (Blog Post) " +"`_" +msgstr "" + +#: ../../source/ref-example-projects.rst:60 +msgid "Legacy Examples (`flwr_example`)" +msgstr "" + +#: ../../source/ref-example-projects.rst:63 +msgid "" +"The useage examples in `flwr_example` are deprecated and will be removed " +"in the future. New examples are provided as standalone projects in " +"`examples `_." +msgstr "" + +#: ../../source/ref-example-projects.rst:69 +msgid "Extra Dependencies" +msgstr "" + +#: ../../source/ref-example-projects.rst:71 +msgid "" +"The core Flower framework keeps a minimal set of dependencies. The " +"examples demonstrate Flower in the context of different machine learning " +"frameworks, so additional dependencies need to be installed before an " +"example can be run." +msgstr "" + +#: ../../source/ref-example-projects.rst:75 +msgid "For PyTorch examples::" +msgstr "" + +#: ../../source/ref-example-projects.rst:79 +msgid "For TensorFlow examples::" +msgstr "" + +#: ../../source/ref-example-projects.rst:83 +msgid "For both PyTorch and TensorFlow examples::" +msgstr "" + +#: ../../source/ref-example-projects.rst:87 +msgid "" +"Please consult :code:`pyproject.toml` for a full list of possible extras " +"(section :code:`[tool.poetry.extras]`)." +msgstr "" + +#: ../../source/ref-example-projects.rst:92 +msgid "PyTorch Examples" +msgstr "" + +#: ../../source/ref-example-projects.rst:94 +msgid "" +"Our PyTorch examples are based on PyTorch 1.7. They should work with " +"other releases as well. So far, we provide the following examples." +msgstr "" + +#: ../../source/ref-example-projects.rst:98 +msgid "CIFAR-10 Image Classification" +msgstr "" + +#: ../../source/ref-example-projects.rst:100 +msgid "" +"`CIFAR-10 and CIFAR-100 `_ " +"are popular RGB image datasets. The Flower CIFAR-10 example uses PyTorch " +"to train a simple CNN classifier in a federated learning setup with two " +"clients." +msgstr "" + +#: ../../source/ref-example-projects.rst:104 +#: ../../source/ref-example-projects.rst:121 +#: ../../source/ref-example-projects.rst:146 +msgid "First, start a Flower server:" +msgstr "" + +#: ../../source/ref-example-projects.rst:106 +msgid "$ ./src/py/flwr_example/pytorch_cifar/run-server.sh" +msgstr "" + +#: ../../source/ref-example-projects.rst:108 +#: ../../source/ref-example-projects.rst:125 +#: ../../source/ref-example-projects.rst:150 +msgid "Then, start the two clients in a new terminal window:" +msgstr "" + +#: ../../source/ref-example-projects.rst:110 +msgid "$ ./src/py/flwr_example/pytorch_cifar/run-clients.sh" +msgstr "" + +#: ../../source/ref-example-projects.rst:112 +msgid "For more details, see :code:`src/py/flwr_example/pytorch_cifar`." +msgstr "" + +#: ../../source/ref-example-projects.rst:115 +msgid "ImageNet-2012 Image Classification" +msgstr "" + +#: ../../source/ref-example-projects.rst:117 +msgid "" +"`ImageNet-2012 `_ is one of the major computer" +" vision datasets. The Flower ImageNet example uses PyTorch to train a " +"ResNet-18 classifier in a federated learning setup with ten clients." +msgstr "" + +#: ../../source/ref-example-projects.rst:123 +msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-server.sh" +msgstr "" + +#: ../../source/ref-example-projects.rst:127 +msgid "$ ./src/py/flwr_example/pytorch_imagenet/run-clients.sh" +msgstr "" + +#: ../../source/ref-example-projects.rst:129 +msgid "For more details, see :code:`src/py/flwr_example/pytorch_imagenet`." +msgstr "" + +#: ../../source/ref-example-projects.rst:133 +msgid "TensorFlow Examples" +msgstr "" + +#: ../../source/ref-example-projects.rst:135 +msgid "" +"Our TensorFlow examples are based on TensorFlow 2.0 or newer. So far, we " +"provide the following examples." +msgstr "" + +#: ../../source/ref-example-projects.rst:139 +msgid "Fashion-MNIST Image Classification" +msgstr "" + +#: ../../source/ref-example-projects.rst:141 +msgid "" +"`Fashion-MNIST `_ is " +"often used as the \"Hello, world!\" of machine learning. We follow this " +"tradition and provide an example which samples random local datasets from" +" Fashion-MNIST and trains a simple image classification model over those " +"partitions." +msgstr "" + +#: ../../source/ref-example-projects.rst:148 +msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-server.sh" +msgstr "" + +#: ../../source/ref-example-projects.rst:152 +msgid "$ ./src/py/flwr_example/tensorflow_fashion_mnist/run-clients.sh" +msgstr "" + +#: ../../source/ref-example-projects.rst:154 +msgid "" +"For more details, see " +":code:`src/py/flwr_example/tensorflow_fashion_mnist`." +msgstr "" + +#: ../../source/ref-faq.rst:4 +msgid "" +"This page collects answers to commonly asked questions about Federated " +"Learning with Flower." +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can Flower run on Juptyter Notebooks / Google Colab?" +msgstr "" + +#: ../../source/ref-faq.rst:8 +msgid "" +"Yes, it can! Flower even comes with a few under-the-hood optimizations to" +" make it work even better on Colab. Here's a quickstart example:" +msgstr "" + +#: ../../source/ref-faq.rst:10 +msgid "" +"`Flower simulation PyTorch " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst:11 +msgid "" +"`Flower simulation TensorFlow/Keras " +"`_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` How can I run Federated Learning on a Raspberry Pi?" +msgstr "" + +#: ../../source/ref-faq.rst:15 +msgid "" +"Find the `blog post about federated learning on embedded device here " +"`_" +" and the corresponding `GitHub code example " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Does Flower support federated learning on Android devices?" +msgstr "" + +#: ../../source/ref-faq.rst:19 +msgid "" +"Yes, it does. Please take a look at our `blog post " +"`_ or check out the code examples:" +msgstr "" + +#: ../../source/ref-faq.rst:21 +msgid "" +"`Android Kotlin example `_" +msgstr "" + +#: ../../source/ref-faq.rst:22 +msgid "`Android Java example `_" +msgstr "" + +#: ../../source/ref-faq.rst +msgid ":fa:`eye,mr-1` Can I combine federated learning with blockchain?" +msgstr "" + +#: ../../source/ref-faq.rst:26 +msgid "" +"Yes, of course. A list of available examples using Flower within a " +"blockchain environment is available here:" +msgstr "" + +#: ../../source/ref-faq.rst:28 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" + +#: ../../source/ref-faq.rst:29 +msgid "" +"`Flower meets Nevermined YouTube video " +"`_." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "" +"`Flower meets KOSMoS `_." +msgstr "" + +#: ../../source/ref-faq.rst:31 +msgid "" +"`Flower meets Talan blog post `_ ." +msgstr "" + +#: ../../source/ref-faq.rst:32 +msgid "" +"`Flower meets Talan GitHub Repository " +"`_ ." +msgstr "" + +#: ../../source/ref-telemetry.md:1 +msgid "Telemetry" +msgstr "" + +#: ../../source/ref-telemetry.md:3 +msgid "" +"The Flower open-source project collects **anonymous** usage metrics to " +"make well-informed decisions to improve Flower. Doing this enables the " +"Flower team to understand how Flower is used and what challenges users " +"might face." +msgstr "" + +#: ../../source/ref-telemetry.md:5 +msgid "" +"**Flower is a friendly framework for collaborative AI and data science.**" +" Staying true to this statement, Flower makes it easy to disable " +"telemetry for users that do not want to share anonymous usage metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:7 +msgid "Principles" +msgstr "" + +#: ../../source/ref-telemetry.md:9 +msgid "We follow strong principles guarding anonymous usage metrics collection:" +msgstr "" + +#: ../../source/ref-telemetry.md:11 +msgid "" +"**Optional:** You will always be able to disable telemetry; read on to " +"learn “[How to opt-out](#how-to-opt-out)”." +msgstr "" + +#: ../../source/ref-telemetry.md:12 +msgid "" +"**Anonymous:** The reported usage metrics are anonymous and do not " +"contain any personally identifiable information (PII). See “[Collected " +"metrics](#collected-metrics)” to understand what metrics are being " +"reported." +msgstr "" + +#: ../../source/ref-telemetry.md:13 +msgid "" +"**Transparent:** You can easily inspect what anonymous metrics are being " +"reported; see the section “[How to inspect what is being reported](#how-" +"to-inspect-what-is-being-reported)”" +msgstr "" + +#: ../../source/ref-telemetry.md:14 +msgid "" +"**Open for feedback:** You can always reach out to us if you have " +"feedback; see the section “[How to contact us](#how-to-contact-us)” for " +"details." +msgstr "" + +#: ../../source/ref-telemetry.md:16 +msgid "How to opt-out" +msgstr "" + +#: ../../source/ref-telemetry.md:18 +msgid "" +"When Flower starts, it will check for an environment variable called " +"`FLWR_TELEMETRY_ENABLED`. Telemetry can easily be disabled by setting " +"`FLWR_TELEMETRY_ENABLED=0`. Assuming you are starting a Flower server or " +"client, simply do so by prepending your command as in:" +msgstr "" + +#: ../../source/ref-telemetry.md:24 +msgid "" +"Alternatively, you can export `FLWR_TELEMETRY_ENABLED=0` in, for example," +" `.bashrc` (or whatever configuration file applies to your environment) " +"to disable Flower telemetry permanently." +msgstr "" + +#: ../../source/ref-telemetry.md:26 +msgid "Collected metrics" +msgstr "" + +#: ../../source/ref-telemetry.md:28 +msgid "Flower telemetry collects the following metrics:" +msgstr "" + +#: ../../source/ref-telemetry.md:30 +msgid "" +"**Flower version.** Understand which versions of Flower are currently " +"being used. This helps us to decide whether we should invest effort into " +"releasing a patch version for an older version of Flower or instead use " +"the bandwidth to build new features." +msgstr "" + +#: ../../source/ref-telemetry.md:32 +msgid "" +"**Operating system.** Enables us to answer questions such as: *Should we " +"create more guides for Linux, macOS, or Windows?*" +msgstr "" + +#: ../../source/ref-telemetry.md:34 +msgid "" +"**Python version.** Knowing the Python version helps us, for example, to " +"decide whether we should invest effort into supporting old versions of " +"Python or stop supporting them and start taking advantage of new Python " +"features." +msgstr "" + +#: ../../source/ref-telemetry.md:36 +msgid "" +"**Hardware properties.** Understanding the hardware environment that " +"Flower is being used in helps to decide whether we should, for example, " +"put more effort into supporting low-resource environments." +msgstr "" + +#: ../../source/ref-telemetry.md:38 +msgid "" +"**Execution mode.** Knowing what execution mode Flower starts in enables " +"us to understand how heavily certain features are being used and better " +"prioritize based on that." +msgstr "" + +#: ../../source/ref-telemetry.md:40 +msgid "" +"**Cluster.** Flower telemetry assigns a random in-memory cluster ID each " +"time a Flower workload starts. This allows us to understand which device " +"types not only start Flower workloads but also successfully complete " +"them." +msgstr "" + +#: ../../source/ref-telemetry.md:42 +msgid "" +"**Source.** Flower telemetry tries to store a random source ID in " +"`~/.flwr/source` the first time a telemetry event is generated. The " +"source ID is important to identify whether an issue is recurring or " +"whether an issue is triggered by multiple clusters running concurrently " +"(which often happens in simulation). For example, if a device runs " +"multiple workloads at the same time, and this results in an issue, then, " +"in order to reproduce the issue, multiple workloads must be started at " +"the same time." +msgstr "" + +#: ../../source/ref-telemetry.md:44 +msgid "" +"You may delete the source ID at any time. If you wish for all events " +"logged under a specific source ID to be deleted, you can send a deletion " +"request mentioning the source ID to `telemetry@flower.dev`. All events " +"related to that source ID will then be permanently deleted." +msgstr "" + +#: ../../source/ref-telemetry.md:46 +msgid "" +"We will not collect any personally identifiable information. If you think" +" any of the metrics collected could be misused in any way, please [get in" +" touch with us](#how-to-contact-us). We will update this page to reflect " +"any changes to the metrics collected and publish changes in the " +"changelog." +msgstr "" + +#: ../../source/ref-telemetry.md:48 +msgid "" +"If you think other metrics would be helpful for us to better guide our " +"decisions, please let us know! We will carefully review them; if we are " +"confident that they do not compromise user privacy, we may add them." +msgstr "" + +#: ../../source/ref-telemetry.md:50 +msgid "How to inspect what is being reported" +msgstr "" + +#: ../../source/ref-telemetry.md:52 +msgid "" +"We wanted to make it very easy for you to inspect what anonymous usage " +"metrics are reported. You can view all the reported telemetry information" +" by setting the environment variable `FLWR_TELEMETRY_LOGGING=1`. Logging " +"is disabled by default. You may use logging independently from " +"`FLWR_TELEMETRY_ENABLED` so that you can inspect the telemetry feature " +"without sending any metrics." +msgstr "" + +#: ../../source/ref-telemetry.md:58 +msgid "" +"The inspect Flower telemetry without sending any anonymous usage metrics," +" use both environment variables:" +msgstr "" + +#: ../../source/ref-telemetry.md:64 +msgid "How to contact us" +msgstr "" + +#: ../../source/ref-telemetry.md:66 +msgid "" +"We want to hear from you. If you have any feedback or ideas on how to " +"improve the way we handle anonymous usage metrics, reach out to us via " +"[Slack](https://flower.dev/join-slack/) (channel `#telemetry`) or email " +"(`telemetry@flower.dev`)." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an Android " +"app using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:5 +msgid "Quickstart Android" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:10 +msgid "" +"Let's build a federated learning system using TFLite and Flower on " +"Android!" +msgstr "" + +#: ../../source/tutorial-quickstart-android.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ to learn " +"more." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with FastAI to train a vision model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:5 +msgid "Quickstart fastai" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:10 +msgid "Let's build a federated learning system using fastai and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:-1 +msgid "" +"Check out this Federating Learning quickstart tutorial for using Flower " +"with HuggingFace Transformers in order to fine-tune an LLM." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:5 +msgid "Quickstart 🤗 Transformers" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:10 +msgid "" +"Let's build a federated learning system using Hugging Face Transformers " +"and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:12 +msgid "" +"We will leverage Hugging Face to federate the training of language models" +" over multiple clients using Flower. More specifically, we will fine-tune" +" a pre-trained Transformer model (distilBERT) for sequence classification" +" over a dataset of IMDB ratings. The end goal is to detect if a movie " +"rating is positive or negative." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:18 +msgid "Dependencies" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:20 +msgid "" +"To follow along this tutorial you will need to install the following " +"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " +":code:`torch`, and :code:`transformers`. This can be done using " +":code:`pip`:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:30 +msgid "Standard Hugging Face workflow" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:33 +msgid "Handling the data" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:35 +msgid "" +"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " +"library. We then need to tokenize the data and create :code:`PyTorch` " +"dataloaders, this is all done in the :code:`load_data` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:81 +msgid "Training and testing the model" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:83 +msgid "" +"Once we have a way of creating our trainloader and testloader, we can " +"take care of the training and testing. This is very similar to any " +":code:`PyTorch` training or testing loop:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:121 +msgid "Creating the model itself" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:123 +msgid "" +"To create the model itself, we will just load the pre-trained distillBERT" +" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:136 +msgid "Federating the example" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:139 +msgid "Creating the IMDBClient" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:141 +msgid "" +"To federate our example to multiple clients, we first need to write our " +"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " +"This is very easy, as our model is a standard :code:`PyTorch` model:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:169 +msgid "" +"The :code:`get_parameters` function lets the server get the client's " +"parameters. Inversely, the :code:`set_parameters` function allows the " +"server to send its parameters to the client. Finally, the :code:`fit` " +"function trains the model locally for the client, and the " +":code:`evaluate` function tests the model locally and returns the " +"relevant metrics." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:175 +msgid "Starting the server" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:177 +msgid "" +"Now that we have a way to instantiate clients, we need to create our " +"server in order to aggregate the results. Using Flower, this can be done " +"very easily by first choosing a strategy (here, we are using " +":code:`FedAvg`, which will define the global weights as the average of " +"all the clients' weights at each round) and then using the " +":code:`flwr.server.start_server` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:205 +msgid "" +"The :code:`weighted_average` function is there to provide a way to " +"aggregate the metrics distributed amongst the clients (basically this " +"allows us to display a nice average accuracy and loss for every round)." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:209 +msgid "Putting everything together" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:211 +msgid "We can now start client instances using:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:221 +msgid "" +"And they will be able to connect to the server and start the federated " +"training." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:223 +msgid "" +"If you want to check out everything put together, you should check out " +"the full code example: [https://github.com/adap/flower/tree/main/examples" +"/quickstart-" +"huggingface](https://github.com/adap/flower/tree/main/examples" +"/quickstart-huggingface)." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:227 +msgid "" +"Of course, this is a very basic example, and a lot can be added or " +"modified, it was just to showcase how simply we could federate a Hugging " +"Face workflow using Flower." +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:230 +msgid "" +"Note that in this example we used :code:`PyTorch`, but we could have very" +" well used :code:`TensorFlow`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:-1 +msgid "" +"Read this Federated Learning quickstart tutorial for creating an iOS app " +"using Flower to train a neural network on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:5 +msgid "Quickstart iOS" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:10 +msgid "" +"In this tutorial we will learn how to train a Neural Network on MNIST " +"using Flower and CoreML on iOS devices." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:12 +msgid "" +"First of all, for running the Flower Python server, it is recommended to " +"create a virtual environment and run everything within a `virtualenv " +"`_. For the Flower " +"client implementation in iOS, it is recommended to use Xcode as our IDE." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:15 +msgid "" +"Our example consists of one Python *server* and two iPhone *clients* that" +" all have the same model." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:17 +msgid "" +"*Clients* are responsible for generating individual weight updates for " +"the model based on their local datasets. These updates are then sent to " +"the *server* which will aggregate them to produce a better model. " +"Finally, the *server* sends this improved version of the model back to " +"each *client*. A complete cycle of weight updates is called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:21 +msgid "" +"Now that we have a rough idea of what is going on, let's get started to " +"setup our Flower server environment. We first need to install Flower. You" +" can do this by using pip:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:27 +msgid "Or Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:36 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training using CoreML as our local training pipeline and " +"MNIST as our dataset. For simplicity reasons we will use the complete " +"Flower client with CoreML, that has been implemented and stored inside " +"the Swift SDK. The client implementation can be seen below:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:72 +msgid "" +"Let's create a new application project in Xcode and add :code:`flwr` as a" +" dependency in your project. For our application, we will store the logic" +" of our app in :code:`FLiOSModel.swift` and the UI elements in " +":code:`ContentView.swift`. We will focus more on :code:`FLiOSModel.swift`" +" in this quickstart. Please refer to the `full code example " +"`_ to learn more " +"about the app." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:75 +msgid "Import Flower and CoreML related packages in :code:`FLiOSModel.swift`:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:83 +msgid "" +"Then add the mlmodel to the project simply by drag-and-drop, the mlmodel " +"will be bundled inside the application during deployment to your iOS " +"device. We need to pass the url to access mlmodel and run CoreML machine " +"learning processes, it can be retrieved by calling the function " +":code:`Bundle.main.url`. For the MNIST dataset, we need to preprocess it " +"into :code:`MLBatchProvider` object. The preprocessing is done inside " +":code:`DataLoader.swift`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:99 +msgid "" +"Since CoreML does not allow the model parameters to be seen before " +"training, and accessing the model parameters during or after the training" +" can only be done by specifying the layer name, we need to know this " +"informations beforehand, through looking at the model specification, " +"which are written as proto files. The implementation can be seen in " +":code:`MLModelInspect`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:102 +msgid "" +"After we have all of the necessary informations, let's create our Flower " +"client." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:117 +msgid "" +"Then start the Flower gRPC client and start communicating to the server " +"by passing our Flower client to the function :code:`startFlwrGRPC`." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:124 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +"call the provided :code:`MLFlwrClient` and call :code:`startFlwrGRPC()`. " +"The attribute :code:`hostname` and :code:`port` tells the client which " +"server to connect to. This can be done by entering the hostname and port " +"in the application before clicking the start button to start the " +"federated learning process." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:131 +#: ../../source/tutorial-quickstart-mxnet.rst:226 +#: ../../source/tutorial-quickstart-pytorch.rst:205 +#: ../../source/tutorial-quickstart-tensorflow.rst:100 +msgid "" +"For simple workloads we can start a Flower server and leave all the " +"configuration possibilities at their default values. In a file named " +":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:142 +#: ../../source/tutorial-quickstart-mxnet.rst:237 +#: ../../source/tutorial-quickstart-pytorch.rst:216 +#: ../../source/tutorial-quickstart-scikitlearn.rst:215 +#: ../../source/tutorial-quickstart-tensorflow.rst:112 +msgid "Train the model, federated!" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:144 +#: ../../source/tutorial-quickstart-pytorch.rst:218 +#: ../../source/tutorial-quickstart-tensorflow.rst:114 +#: ../../source/tutorial-quickstart-xgboost.rst:522 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. FL systems usually have a server and " +"multiple clients. We therefore have to start the server first:" +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:152 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Build and run the client through your Xcode, one through Xcode" +" Simulator and the other by deploying it to your iPhone. To see more " +"about how to deploy your app to iPhone or Simulator visit `here " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-ios.rst:156 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system in your ios device. The full `source code " +"`_ for this " +"example can be found in :code:`examples/ios`." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Jax to train a linear regression model on a scikit-learn dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-jax.rst:5 +msgid "Quickstart JAX" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with MXNet to train a Sequential model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:5 +msgid "Quickstart MXNet" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Sequential` model " +"on MNIST using Flower and MXNet." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:12 +#: ../../source/tutorial-quickstart-scikitlearn.rst:12 +msgid "" +"It is recommended to create a virtual environment and run everything " +"within this `virtualenv `_." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:16 +#: ../../source/tutorial-quickstart-scikitlearn.rst:16 +msgid "" +"*Clients* are responsible for generating individual model parameter " +"updates for the model based on their local datasets. These updates are " +"then sent to the *server* which will aggregate them to produce an updated" +" global model. Finally, the *server* sends this improved version of the " +"model back to each *client*. A complete cycle of parameters updates is " +"called a *round*." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:20 +#: ../../source/tutorial-quickstart-scikitlearn.rst:20 +msgid "" +"Now that we have a rough idea of what is going on, let's get started. We " +"first need to install Flower. You can do this by running:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:26 +msgid "Since we want to use MXNet, let's go ahead and install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:36 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on MXNet´s `Hand-written " +"Digit Recognition tutorial " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:38 +msgid "" +"In a file called :code:`client.py`, import Flower and MXNet related " +"packages:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:53 +msgid "In addition, define the device allocation in MXNet with:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:59 +msgid "" +"We use MXNet to load MNIST, a popular image classification dataset of " +"handwritten digits for machine learning. The MXNet utility " +":code:`mx.test_utils.get_mnist()` downloads the training and test data." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:73 +msgid "" +"Define the training and loss with MXNet. We train the model by looping " +"over the dataset, measure the corresponding loss, and optimize it." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:111 +msgid "" +"Next, we define the validation of our machine learning model. We loop " +"over the test set and measure both loss and accuracy on the test set." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:135 +msgid "" +"After defining the training and testing of a MXNet machine learning " +"model, we use these functions to implement a Flower client." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:137 +msgid "Our Flower clients will use a simple :code:`Sequential` model:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:156 +msgid "" +"After loading the dataset with :code:`load_data()` we perform one forward" +" propagation to initialize the model and model parameters with " +":code:`model(init)`. Next, we implement a Flower client." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:158 +#: ../../source/tutorial-quickstart-pytorch.rst:144 +#: ../../source/tutorial-quickstart-tensorflow.rst:54 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to train the neural network we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:164 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses MXNet. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:170 +#: ../../source/tutorial-quickstart-pytorch.rst:156 +#: ../../source/tutorial-quickstart-scikitlearn.rst:109 +msgid "return the model weight as a list of NumPy ndarrays" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:171 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +msgid ":code:`set_parameters` (optional)" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:172 +#: ../../source/tutorial-quickstart-pytorch.rst:158 +#: ../../source/tutorial-quickstart-scikitlearn.rst:111 +msgid "" +"update the local model weights with the parameters received from the " +"server" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:174 +#: ../../source/tutorial-quickstart-pytorch.rst:160 +#: ../../source/tutorial-quickstart-scikitlearn.rst:114 +msgid "set the local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:175 +#: ../../source/tutorial-quickstart-pytorch.rst:161 +#: ../../source/tutorial-quickstart-scikitlearn.rst:115 +msgid "train the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:176 +#: ../../source/tutorial-quickstart-pytorch.rst:162 +#: ../../source/tutorial-quickstart-scikitlearn.rst:116 +msgid "receive the updated local model weights" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:178 +#: ../../source/tutorial-quickstart-pytorch.rst:164 +#: ../../source/tutorial-quickstart-scikitlearn.rst:118 +msgid "test the local model" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:180 +msgid "They can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:210 +msgid "" +"We can now create an instance of our class :code:`MNISTClient` and add " +"one line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:217 +#: ../../source/tutorial-quickstart-scikitlearn.rst:150 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()` or " +":code:`fl.client.start_numpy_client()`. The string " +":code:`\"0.0.0.0:8080\"` tells the client which server to connect to. In " +"our case we can run the server and the client on the same machine, " +"therefore we use :code:`\"0.0.0.0:8080\"`. If we run a truly federated " +"workload with the server and clients running on different machines, all " +"that needs to change is the :code:`server_address` we pass to the client." +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:239 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We therefore have to start the server first:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:247 +#: ../../source/tutorial-quickstart-pytorch.rst:226 +#: ../../source/tutorial-quickstart-scikitlearn.rst:224 +#: ../../source/tutorial-quickstart-tensorflow.rst:122 +#: ../../source/tutorial-quickstart-xgboost.rst:530 +msgid "" +"Once the server is running we can start the clients in different " +"terminals. Open a new terminal and start the first client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:233 +#: ../../source/tutorial-quickstart-scikitlearn.rst:231 +#: ../../source/tutorial-quickstart-tensorflow.rst:129 +#: ../../source/tutorial-quickstart-xgboost.rst:537 +msgid "Open another terminal and start the second client:" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:260 +#: ../../source/tutorial-quickstart-pytorch.rst:239 +#: ../../source/tutorial-quickstart-scikitlearn.rst:237 +#: ../../source/tutorial-quickstart-xgboost.rst:543 +msgid "" +"Each client will have its own dataset. You should now see how the " +"training does in the very first terminal (the one that started the " +"server):" +msgstr "" + +#: ../../source/tutorial-quickstart-mxnet.rst:292 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-mxnet`." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with Pandas to perform Federated Analytics." +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:5 +msgid "Quickstart Pandas" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:10 +msgid "Let's build a federated analytics system using Pandas and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pandas.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ " +"to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch to train a CNN model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:13 +msgid "" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR10 using Flower and PyTorch." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:15 +#: ../../source/tutorial-quickstart-xgboost.rst:36 +msgid "" +"First of all, it is recommended to create a virtual environment and run " +"everything within a `virtualenv `_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:29 +msgid "" +"Since we want to use PyTorch to solve a computer vision task, let's go " +"ahead and install PyTorch and the **torchvision** library:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:39 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. Our training " +"procedure and network architecture are based on PyTorch's `Deep Learning " +"with PyTorch " +"`_." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:41 +msgid "" +"In a file called :code:`client.py`, import Flower and PyTorch related " +"packages:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:56 +msgid "In addition, we define the device allocation in PyTorch with:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:62 +msgid "" +"We use PyTorch to load CIFAR10, a popular colored image classification " +"dataset for machine learning. The PyTorch :code:`DataLoader()` downloads " +"the training and test data that are then normalized." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:78 +msgid "" +"Define the loss and optimizer with PyTorch. The training of the dataset " +"is done by looping over the dataset, measure the corresponding loss and " +"optimize it." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:94 +msgid "" +"Define then the validation of the machine learning network. We loop over" +" the test set and measure the loss and accuracy of the test set." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:113 +msgid "" +"After defining the training and testing of a PyTorch machine learning " +"model, we use the functions for the Flower clients." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:115 +msgid "" +"The Flower clients will use a simple CNN adapted from 'PyTorch: A 60 " +"Minute Blitz':" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:142 +msgid "" +"After loading the data set with :code:`load_data()` we define the Flower " +"interface." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:150 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses PyTorch. Implementing :code:`NumPyClient` usually means " +"defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:166 +msgid "which can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:189 +#: ../../source/tutorial-quickstart-tensorflow.rst:82 +msgid "" +"We can now create an instance of our class :code:`CifarClient` and add " +"one line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:196 +#: ../../source/tutorial-quickstart-tensorflow.rst:90 +msgid "" +"That's it for the client. We only have to implement :code:`Client` or " +":code:`NumPyClient` and call :code:`fl.client.start_client()` or " +":code:`fl.client.start_numpy_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch.rst:271 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples" +"/quickstart-pytorch`." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with PyTorch Lightning to train an Auto Encoder model on MNIST." +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +msgid "" +"Let's build a horizontal federated learning system using PyTorch " +"Lightning and Flower!" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "" +"Please refer to the `full code example " +"`_ to learn more." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with scikit-learn to train a linear regression model." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:5 +msgid "Quickstart scikit-learn" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:10 +msgid "" +"In this tutorial, we will learn how to train a :code:`Logistic " +"Regression` model on MNIST using Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:26 +msgid "Since we want to use scikt-learn, let's go ahead and install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:32 +msgid "Or simply install all dependencies using Poetry:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:42 +msgid "" +"Now that we have all our dependencies installed, let's run a simple " +"distributed training with two clients and one server. However, before " +"setting up the client and server, we will define all functionalities that" +" we need for our federated learning setup within :code:`utils.py`. The " +":code:`utils.py` contains different functions defining all the machine " +"learning basics:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:45 +msgid ":code:`get_model_parameters()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:46 +msgid "Returns the paramters of a :code:`sklearn` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:47 +msgid ":code:`set_model_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:48 +msgid "Sets the parameters of a :code:`sklean` LogisticRegression model" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +msgid ":code:`set_initial_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 +msgid "Initializes the model parameters that the Flower server will ask for" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:51 +msgid ":code:`load_mnist()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:52 +msgid "Loads the MNIST dataset using OpenML" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:53 +msgid ":code:`shuffle()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:54 +msgid "Shuffles data and its label" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid ":code:`partition()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:56 +msgid "Splits datasets into a number of partitions" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:58 +msgid "" +"Please check out :code:`utils.py` `here " +"`_ for more details. The pre-defined functions are used in" +" the :code:`client.py` and imported. The :code:`client.py` also requires " +"to import several packages such as Flower and scikit-learn:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:73 +msgid "" +"We load the MNIST dataset from `OpenML `_, " +"a popular image classification dataset of handwritten digits for machine " +"learning. The utility :code:`utils.load_mnist()` downloads the training " +"and test data. The training set is split afterwards into 10 partitions " +"with :code:`utils.partition()`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:85 +msgid "" +"Next, the logistic regression model is defined and initialized with " +":code:`utils.set_initial_params()`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:97 +msgid "" +"The Flower server interacts with clients through an interface called " +":code:`Client`. When the server selects a particular client for training," +" it sends training instructions over the network. The client receives " +"those instructions and calls one of the :code:`Client` methods to run " +"your code (i.e., to fit the logistic regression we defined earlier)." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:103 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses scikit-learn. Implementing :code:`NumPyClient` usually " +"means defining the following methods (:code:`set_parameters` is optional " +"though):" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:112 +msgid "is directly imported with :code:`utils.set_model_params()`" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +msgid "The methods can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:143 +msgid "" +"We can now create an instance of our class :code:`MnistClient` and add " +"one line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:159 +msgid "" +"The following Flower server is a little bit more advanced and returns an " +"evaluation function for the server-side evaluation. First, we import " +"again all required libraries such as Flower and scikit-learn." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:162 +msgid ":code:`server.py`, import Flower and start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:173 +msgid "" +"The number of federated learning rounds is set in :code:`fit_round()` and" +" the evaluation is defined in :code:`get_evaluate_fn()`. The evaluation " +"function is called after each federated learning round and gives you " +"information about loss and accuracy." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:198 +msgid "" +"The :code:`main` contains the server-side parameter initialization " +":code:`utils.set_initial_params()` as well as the aggregation strategy " +":code:`fl.server.strategy:FedAvg()`. The strategy is the default one, " +"federated averaging (or FedAvg), with two clients and evaluation after " +"each federated learning round. The server can be started with the command" +" :code:`fl.server.start_server(server_address=\"0.0.0.0:8080\", " +"strategy=strategy, config=fl.server.ServerConfig(num_rounds=3))`." +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:217 +msgid "" +"With both client and server ready, we can now run everything and see " +"federated learning in action. Federated learning systems usually have a " +"server and multiple clients. We, therefore, have to start the server " +"first:" +msgstr "" + +#: ../../source/tutorial-quickstart-scikitlearn.rst:271 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this example can be found in :code:`examples/sklearn-logreg-" +"mnist`." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:5 +msgid "Quickstart TensorFlow" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "Let's build a federated learning system in less than 20 lines of code!" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:15 +msgid "Before Flower can be imported we have to install it:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:21 +msgid "" +"Since we want to use the Keras API of TensorFlow (TF), we have to install" +" TF as well:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:31 +msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:38 +msgid "" +"We use the Keras utilities of TF to load CIFAR10, a popular colored image" +" classification dataset for machine learning. The call to " +":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " +"it locally, and then returns the entire training and test set as NumPy " +"ndarrays." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:47 +msgid "" +"Next, we need a model. For the purpose of this tutorial, we use " +"MobilNetV2 with 10 output classes:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:60 +msgid "" +"Flower provides a convenience class called :code:`NumPyClient` which " +"makes it easier to implement the :code:`Client` interface when your " +"workload uses Keras. The :code:`NumPyClient` interface defines three " +"methods which can be implemented in the following way:" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:135 +msgid "Each client will have its own dataset." +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:137 +msgid "" +"You should now see how the training does in the very first terminal (the " +"one that started the server):" +msgstr "" + +#: ../../source/tutorial-quickstart-tensorflow.rst:169 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"learning system. The full `source code " +"`_ for this can be found in :code:`examples" +"/quickstart-tensorflow/client.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:-1 +msgid "" +"Check out this Federated Learning quickstart tutorial for using Flower " +"with XGBoost to train classification models on trees." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:5 +msgid "Quickstart XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:11 +msgid "Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:13 +msgid "" +"EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient " +"implementation of gradient-boosted decision tree (**GBDT**), that " +"maximises the computational boundaries for boosted tree methods. It's " +"primarily designed to enhance both the performance and computational " +"speed of machine learning models. In XGBoost, trees are constructed " +"concurrently, unlike the sequential approach taken by GBDT." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:17 +msgid "" +"Often, for tabular data on medium-sized datasets with fewer than 10k " +"training examples, XGBoost surpasses the results of deep learning " +"techniques." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:20 +msgid "Why federated XGBoost?" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:22 +msgid "" +"Indeed, as the demand for data privacy and decentralized learning grows, " +"there's an increasing requirement to implement federated XGBoost systems " +"for specialised applications, like survival analysis and financial fraud " +"detection." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:24 +msgid "" +"Federated learning ensures that raw data remains on the local device, " +"making it an attractive approach for sensitive domains where data " +"security and privacy are paramount. Given the robustness and efficiency " +"of XGBoost, combining it with federated learning offers a promising " +"solution for these specific challenges." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:27 +msgid "" +"In this tutorial we will learn how to train a federated XGBoost model on " +"HIGGS dataset using Flower and :code:`xgboost` package. We use a simple " +"example (`full code xgboost-quickstart " +"`_)" +" with two *clients* and one *server* to demonstrate how federated XGBoost" +" works, and then we dive into a more complex example (`full code xgboost-" +"comprehensive `_) to run various experiments." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:34 +msgid "Environment Setup" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:38 +msgid "" +"We first need to install Flower and Flower Datasets. You can do this by " +"running :" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:44 +msgid "" +"Since we want to use :code:`xgboost` package to build up XGBoost trees, " +"let's go ahead and install :code:`xgboost`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:54 +msgid "" +"*Clients* are responsible for generating individual weight-updates for " +"the model based on their local datasets. Now that we have all our " +"dependencies installed, let's run a simple distributed training with two " +"clients and one server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:57 +msgid "" +"In a file called :code:`client.py`, import xgboost, Flower, Flower " +"Datasets and other related functions:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:84 +msgid "Dataset partition and hyper-parameter selection" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:86 +msgid "" +"Prior to local training, we require loading the HIGGS dataset from Flower" +" Datasets and conduct data partitioning for FL:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:99 +msgid "" +"In this example, we split the dataset into two partitions with uniform " +"distribution (:code:`IidPartitioner(num_partitions=2)`). Then, we load " +"the partition for the given client based on :code:`node_id`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:118 +msgid "" +"After that, we do train/test splitting on the given partition (client's " +"local data), and transform data format for :code:`xgboost` package." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:131 +msgid "" +"The functions of :code:`train_test_split` and " +":code:`transform_dataset_to_dmatrix` are defined as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:155 +msgid "Finally, we define the hyper-parameters used for XGBoost training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:171 +msgid "" +"The :code:`num_local_round` represents the number of iterations for local" +" tree boost. We use CPU for the training in default. One can shift it to " +"GPU by setting :code:`tree_method` to :code:`gpu_hist`. We use AUC as " +"evaluation metric." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:178 +msgid "Flower client definition for XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:180 +msgid "" +"After loading the dataset we define the Flower client. We follow the " +"general rule to define :code:`XgbClient` class inherited from " +":code:`fl.client.Client`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:190 +msgid "" +"The :code:`self.bst` is used to keep the Booster objects that remain " +"consistent across rounds, allowing them to store predictions from trees " +"integrated in earlier rounds and maintain other essential data structures" +" for training." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:193 +msgid "" +"Then, we override :code:`get_parameters`, :code:`fit` and " +":code:`evaluate` methods insides :code:`XgbClient` class as follows." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:207 +msgid "" +"Unlike neural network training, XGBoost trees are not started from a " +"specified random weights. In this case, we do not use " +":code:`get_parameters` and :code:`set_parameters` to initialise model " +"parameters for XGBoost. As a result, let's return an empty tensor in " +":code:`get_parameters` when it is called by the server at the first " +"round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:248 +msgid "" +"In :code:`fit`, at the first round, we call :code:`xgb.train()` to build " +"up the first set of trees. the returned Booster object and config are " +"stored in :code:`self.bst` and :code:`self.config`, respectively. From " +"the second round, we load the global model sent from server to " +":code:`self.bst`, and then update model weights on local training data " +"with function :code:`local_boost` as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:266 +msgid "" +"Given :code:`num_local_round`, we update trees by calling " +":code:`self.bst.update` method. After training, the last " +":code:`N=num_local_round` trees will be extracted to send to the server." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:288 +msgid "" +"In :code:`evaluate`, we call :code:`self.bst.eval_set` function to " +"conduct evaluation on valid set. The AUC value will be returned." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:291 +msgid "" +"Now, we can create an instance of our class :code:`XgbClient` and add one" +" line to actually run this client:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:297 +msgid "" +"That's it for the client. We only have to implement :code:`Client`and " +"call :code:`fl.client.start_client()`. The string :code:`\"[::]:8080\"` " +"tells the client which server to connect to. In our case we can run the " +"server and the client on the same machine, therefore we use " +":code:`\"[::]:8080\"`. If we run a truly federated workload with the " +"server and clients running on different machines, all that needs to " +"change is the :code:`server_address` we point the client at." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:308 +msgid "" +"These updates are then sent to the *server* which will aggregate them to " +"produce a better model. Finally, the *server* sends this improved version" +" of the model back to each *client* to finish a complete FL round." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:311 +msgid "" +"In a file named :code:`server.py`, import Flower and FedXgbBagging from " +":code:`flwr.server.strategy`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:313 +msgid "We first define a strategy for XGBoost bagging aggregation." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:336 +msgid "" +"We use two clients for this example. An " +":code:`evaluate_metrics_aggregation` function is defined to collect and " +"wighted average the AUC values from clients." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:339 +msgid "Then, we start the server:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:351 +msgid "Tree-based bagging aggregation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:353 +msgid "" +"You must be curious about how bagging aggregation works. Let's look into " +"the details." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:355 +msgid "" +"In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define " +":code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`." +" Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` " +"and :code:`evaluate` methods as follows:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:451 +msgid "" +"In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost " +"trees by calling :code:`aggregate()` function:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:510 +msgid "" +"In this function, we first fetch the number of trees and the number of " +"parallel trees for the current and previous model by calling " +":code:`_get_tree_nums`. Then, the fetched information will be aggregated." +" After that, the trees (containing model weights) are aggregated to " +"generate a new tree model." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:515 +msgid "" +"After traversal of all clients' models, a new global model is generated, " +"followed by the serialisation, and sending back to each client." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:520 +msgid "Launch Federated XGBoost!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:582 +msgid "" +"Congratulations! You've successfully built and run your first federated " +"XGBoost system. The AUC values can be checked in " +":code:`metrics_distributed`. One can see that the average AUC increases " +"over FL rounds." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:587 +msgid "" +"The full `source code `_ for this example can be found in :code:`examples" +"/xgboost-quickstart`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:591 +msgid "Comprehensive Federated XGBoost" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:593 +msgid "" +"Now that you have known how federated XGBoost work with Flower, it's time" +" to run some more comprehensive experiments by customising the " +"experimental settings. In the xgboost-comprehensive example (`full code " +"`_), we provide more options to define various experimental" +" setups, including data partitioning and centralised/distributed " +"evaluation. Let's take a look!" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:599 +msgid "Customised data partitioning" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:601 +msgid "" +"In :code:`dataset.py`, we have a function :code:`instantiate_partitioner`" +" to instantiate the data partitioner based on the given " +":code:`num_partitions` and :code:`partitioner_type`. Currently, we " +"provide four supported partitioner type to simulate the uniformity/non-" +"uniformity in data quantity (uniform, linear, square, exponential)." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:632 +msgid "Customised centralised/distributed evaluation" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:634 +msgid "" +"To facilitate centralised evaluation, we define a function in " +":code:`server.py`:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:666 +msgid "" +"This function returns a evaluation function which instantiates a " +":code:`Booster` object and loads the global model weights to it. The " +"evaluation is conducted by calling :code:`eval_set()` method, and the " +"tested AUC value is reported." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:669 +msgid "" +"As for distributed evaluation on the clients, it's same as the quick-" +"start example by overriding the :code:`evaluate()` method insides the " +":code:`XgbClient` class in :code:`client.py`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:673 +msgid "Arguments parser" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:675 +msgid "" +"In :code:`utils.py`, we define the arguments parsers for clients and " +"server, allowing users to specify different experimental settings. Let's " +"first see the sever side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:714 +msgid "" +"This allows user to specify the number of total clients / FL rounds / " +"participating clients / clients for evaluation, and evaluation fashion. " +"Note that with :code:`--centralised-eval`, the sever will do centralised " +"evaluation and all functionalities for client evaluation will be " +"disabled." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:718 +msgid "Then, the argument parser on client side:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:760 +msgid "" +"This defines various options for client data partitioning. Besides, " +"clients also have a option to conduct evaluation on centralised test set " +"by setting :code:`--centralised-eval`." +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:764 +msgid "Example commands" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:766 +msgid "" +"To run a centralised evaluated experiment on 5 clients with exponential " +"distribution for 50 rounds, we first start the server as below:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:773 +msgid "Then, on each client terminal, we start the clients:" +msgstr "" + +#: ../../source/tutorial-quickstart-xgboost.rst:779 +msgid "" +"The full `source code `_ for this comprehensive example can be found in" +" :code:`examples/xgboost-comprehensive`." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:9 +msgid "Build a strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:11 +msgid "" +"Welcome to the third part of the Flower federated learning tutorial. In " +"previous parts of this tutorial, we introduced federated learning with " +"PyTorch and Flower (`part 1 `__) and we learned how strategies " +"can be used to customize the execution on both the server and the clients" +" (`part 2 `__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll continue to customize the federated learning " +"system we built previously by creating a custom version of FedAvg (again," +" using `Flower `__ and `PyTorch " +"`__)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:15 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:16 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:15 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:15 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the Flower community on Slack to connect, ask questions, and get help: " +"`Join Slack `__ 🌼 We'd love to hear from " +"you in the ``#introductions`` channel! And if anything is unclear, head " +"over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:17 +msgid "Let's build a new ``Strategy`` from scratch!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:29 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:29 +msgid "Preparation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:31 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:32 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:31 +msgid "" +"Before we begin with the actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:43 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:44 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:43 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:43 +msgid "Installing dependencies" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:45 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:46 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:45 +msgid "First, we install the necessary packages:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:65 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:66 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:65 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:65 +msgid "" +"Now that we have all dependencies installed, we can import everything we " +"need for this tutorial:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:101 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:102 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:104 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:101 +msgid "" +"It is possible to switch to a runtime that has GPU acceleration enabled " +"(on Google Colab: ``Runtime > Change runtime type > Hardware acclerator: " +"GPU > Save``). Note, however, that Google Colab is not always able to " +"offer GPU acceleration. If you see an error related to GPU availability " +"in one of the following sections, consider switching back to CPU-based " +"execution by setting ``DEVICE = torch.device(\"cpu\")``. If the runtime " +"has GPU acceleration enabled, you should see the output ``Training on " +"cuda``, otherwise it'll say ``Training on cpu``." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:114 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:115 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:114 +msgid "Data loading" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:116 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:116 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``. We introduce a new parameter" +" ``num_clients`` which allows us to call ``load_datasets`` with different" +" numbers of clients." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:167 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:168 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:167 +msgid "Model training/evaluation" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:169 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:170 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:169 +msgid "" +"Let's continue with the usual model definition (including " +"``set_parameters`` and ``get_parameters``), training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:258 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:258 +msgid "Flower client" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:260 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:260 +msgid "" +"To implement the Flower client, we (again) create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``. Here, we also pass the " +"``cid`` to the client and use it log additional details:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:308 +msgid "Let's test what we have so far before we continue:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:339 +msgid "Build a Strategy from scratch" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:341 +msgid "" +"Let’s overwrite the ``configure_fit`` method such that it passes a higher" +" learning rate (potentially also other hyperparameters) to the optimizer " +"of a fraction of the clients. We will keep the sampling of the clients as" +" it is in ``FedAvg`` and then change the configuration dictionary (one of" +" the ``FitIns`` attributes)." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:507 +msgid "" +"The only thing left is to use the newly created custom Strategy " +"``FedCustom`` when starting the experiment:" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:534 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:932 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:697 +msgid "Recap" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:536 +msgid "" +"In this notebook, we’ve seen how to implement a custom strategy. A custom" +" strategy enables granular control over client node configuration, result" +" aggregation, and more. To define a custom strategy, you only have to " +"overwrite the abstract methods of the (abstract) base class ``Strategy``." +" To make custom strategies even more powerful, you can pass custom " +"functions to the constructor of your new class (``__init__``) and then " +"call these functions whenever needed." +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:550 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:948 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:749 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:715 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:369 +msgid "" +"Before you continue, make sure to join the Flower community on Slack: " +"`Join Slack `__" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:552 +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:950 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:751 +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:717 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:371 +msgid "" +"There's a dedicated ``#questions`` channel if you need help, but we'd " +"also love to hear who you are in ``#introductions``!" +msgstr "" + +#: ../../source/tutorial-series-build-a-strategy-from-scratch-pytorch.ipynb:554 +msgid "" +"The `Flower Federated Learning Tutorial - Part 4 " +"`__ introduces ``Client``, the flexible API underlying " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:9 +msgid "Customize the client" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:11 +msgid "" +"Welcome to the fourth part of the Flower federated learning tutorial. In " +"the previous parts of this tutorial, we introduced federated learning " +"with PyTorch and Flower (`part 1 `__), we learned how " +"strategies can be used to customize the execution on both the server and " +"the clients (`part 2 `__), and we built our own " +"custom strategy from scratch (`part 3 `__)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:14 +msgid "" +"In this notebook, we revisit ``NumPyClient`` and introduce a new " +"baseclass for building clients, simply named ``Client``. In previous " +"parts of this tutorial, we've based our client on ``NumPyClient``, a " +"convenience class which makes it easy to work with machine learning " +"libraries that have good NumPy interoperability. With ``Client``, we gain" +" a lot of flexibility that we didn't have before, but we'll also have to " +"do a few things the we didn't have to do before." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:18 +msgid "" +"Let's go deeper and see what it takes to move from ``NumPyClient`` to " +"``Client``!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:30 +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:29 +msgid "Step 0: Preparation" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:117 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap everything in their own ``DataLoader``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:259 +msgid "Step 1: Revisiting NumPyClient" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:261 +msgid "" +"So far, we've implemented our client by subclassing " +"``flwr.client.NumPyClient``. The three methods we implemented are " +"``get_parameters``, ``fit``, and ``evaluate``. Finally, we wrap the " +"creation of instances of this class in a function called ``client_fn``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:309 +msgid "" +"We've seen this before, there's nothing new so far. The only *tiny* " +"difference compared to the previous notebook is naming, we've changed " +"``FlowerClient`` to ``FlowerNumPyClient`` and ``client_fn`` to " +"``numpyclient_fn``. Let's run it to see the output we get:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:339 +msgid "" +"This works as expected, two clients are training for three rounds of " +"federated learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:341 +msgid "" +"Let's dive a little bit deeper and discuss how Flower executes this " +"simulation. Whenever a client is selected to do some work, " +"``start_simulation`` calls the function ``numpyclient_fn`` to create an " +"instance of our ``FlowerNumPyClient`` (along with loading the model and " +"the data)." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:343 +msgid "" +"But here's the perhaps surprising part: Flower doesn't actually use the " +"``FlowerNumPyClient`` object directly. Instead, it wraps the object to " +"makes it look like a subclass of ``flwr.client.Client``, not " +"``flwr.client.NumPyClient``. In fact, the Flower core framework doesn't " +"know how to handle ``NumPyClient``'s, it only knows how to handle " +"``Client``'s. ``NumPyClient`` is just a convenience abstraction built on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:345 +msgid "" +"Instead of building on top of ``NumPyClient``, we can directly build on " +"top of ``Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:357 +msgid "Step 2: Moving from ``NumPyClient`` to ``Client``" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:359 +msgid "" +"Let's try to do the same thing using ``Client`` instead of " +"``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:465 +msgid "" +"Before we discuss the code in more detail, let's try to run it! Gotta " +"make sure our new ``Client``-based client works, right?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:490 +msgid "" +"That's it, we're now using ``Client``. It probably looks similar to what " +"we've done with ``NumPyClient``. So what's the difference?" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:492 +msgid "" +"First of all, it's more code. But why? The difference comes from the fact" +" that ``Client`` expects us to take care of parameter serialization and " +"deserialization. For Flower to be able to send parameters over the " +"network, it eventually needs to turn these parameters into ``bytes``. " +"Turning parameters (e.g., NumPy ``ndarray``'s) into raw bytes is called " +"serialization. Turning raw bytes into something more useful (like NumPy " +"``ndarray``'s) is called deserialization. Flower needs to do both: it " +"needs to serialize parameters on the server-side and send them to the " +"client, the client needs to deserialize them to use them for local " +"training, and then serialize the updated parameters again to send them " +"back to the server, which (finally!) deserializes them again in order to " +"aggregate them with the updates received from other clients." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:495 +msgid "" +"The only *real* difference between Client and NumPyClient is that " +"NumPyClient takes care of serialization and deserialization for you. It " +"can do so because it expects you to return parameters as NumPy ndarray's," +" and it knows how to handle these. This makes working with machine " +"learning libraries that have good NumPy support (most of them) a breeze." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:497 +msgid "" +"In terms of API, there's one major difference: all methods in Client take" +" exactly one argument (e.g., ``FitIns`` in ``Client.fit``) and return " +"exactly one value (e.g., ``FitRes`` in ``Client.fit``). The methods in " +"``NumPyClient`` on the other hand have multiple arguments (e.g., " +"``parameters`` and ``config`` in ``NumPyClient.fit``) and multiple return" +" values (e.g., ``parameters``, ``num_example``, and ``metrics`` in " +"``NumPyClient.fit``) if there are multiple things to handle. These " +"``*Ins`` and ``*Res`` objects in ``Client`` wrap all the individual " +"values you're used to from ``NumPyClient``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:510 +msgid "Step 3: Custom serialization" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:512 +msgid "" +"Here we will explore how to implement custom serialization with a simple " +"example." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:514 +msgid "" +"But first what is serialization? Serialization is just the process of " +"converting an object into raw bytes, and equally as important, " +"deserialization is the process of converting raw bytes back into an " +"object. This is very useful for network communication. Indeed, without " +"serialization, you could not just a Python object through the internet." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:516 +msgid "" +"Federated Learning relies heavily on internet communication for training " +"by sending Python objects back and forth between the clients and the " +"server. This means that serialization is an essential part of Federated " +"Learning." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:518 +msgid "" +"In the following section, we will write a basic example where instead of " +"sending a serialized version of our ``ndarray``\\ s containing our " +"parameters, we will first convert the ``ndarray`` into sparse matrices, " +"before sending them. This technique can be used to save bandwidth, as in " +"certain cases where the weights of a model are sparse (containing many 0 " +"entries), converting them to a sparse matrix can greatly improve their " +"bytesize." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:521 +msgid "Our custom serialization/deserialization functions" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:523 +msgid "" +"This is where the real serialization/deserialization will happen, " +"especially in ``ndarray_to_sparse_bytes`` for serialization and " +"``sparse_bytes_to_ndarray`` for deserialization." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:525 +msgid "" +"Note that we imported the ``scipy.sparse`` library in order to convert " +"our arrays." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:613 +msgid "Client-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:615 +msgid "" +"To be able to able to serialize our ``ndarray``\\ s into sparse " +"parameters, we will just have to call our custom functions in our " +"``flwr.client.Client``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:617 +msgid "" +"Indeed, in ``get_parameters`` we need to serialize the parameters we got " +"from our network using our custom ``ndarrays_to_sparse_parameters`` " +"defined above." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:619 +msgid "" +"In ``fit``, we first need to deserialize the parameters coming from the " +"server using our custom ``sparse_parameters_to_ndarrays`` and then we " +"need to serialize our local results with " +"``ndarrays_to_sparse_parameters``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:621 +msgid "" +"In ``evaluate``, we will only need to deserialize the global parameters " +"with our custom function." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:725 +msgid "Server-side" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:727 +msgid "" +"For this example, we will just use ``FedAvg`` as a strategy. To change " +"the serialization and deserialization here, we only need to reimplement " +"the ``evaluate`` and ``aggregate_fit`` functions of ``FedAvg``. The other" +" functions of the strategy will be inherited from the super class " +"``FedAvg``." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:729 +msgid "As you can see only one line as change in ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:735 +msgid "" +"And for ``aggregate_fit``, we will first deserialize every result we " +"received:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:744 +msgid "And then serialize the aggregated result:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:903 +msgid "We can now run our custom serialization example!" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:934 +msgid "" +"In this part of the tutorial, we've seen how we can build clients by " +"subclassing either ``NumPyClient`` or ``Client``. ``NumPyClient`` is a " +"convenience abstraction that makes it easier to work with machine " +"learning libraries that have good NumPy interoperability. ``Client`` is a" +" more flexible abstraction that allows us to do things that are not " +"possible in ``NumPyClient``. In order to do so, it requires us to handle " +"parameter serialization and deserialization ourselves." +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:952 +msgid "" +"This is the final part of the Flower tutorial (for now!), " +"congratulations! You're now well equipped to understand the rest of the " +"documentation. There are many topics we didn't cover in the tutorial, we " +"recommend the following resources:" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:954 +msgid "`Read Flower Docs `__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:955 +msgid "" +"`Check out Flower Code Examples " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:956 +msgid "" +"`Use Flower Baselines for your research " +"`__" +msgstr "" + +#: ../../source/tutorial-series-customize-the-client-pytorch.ipynb:957 +msgid "" +"`Watch Flower Summit 2023 videos `__" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:9 +msgid "Get started with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:11 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:11 +msgid "Welcome to the Flower federated learning tutorial!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll build a federated learning system using Flower " +"and PyTorch. In part 1, we use PyTorch for the model training pipeline " +"and data loading. In part 2, we continue to federate the PyTorch-based " +"pipeline using Flower." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:17 +msgid "Let's get stated!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:31 +msgid "" +"Before we begin with any actual code, let's make sure that we have " +"everything we need." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:45 +msgid "" +"Next, we install the necessary packages for PyTorch (``torch`` and " +"``torchvision``) and Flower (``flwr``):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:117 +msgid "Loading the data" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:119 +msgid "" +"Federated learning can be applied to many different types of tasks across" +" different domains. In this tutorial, we introduce federated learning by " +"training a simple convolutional neural network (CNN) on the popular " +"CIFAR-10 dataset. CIFAR-10 can be used to train image classifiers that " +"distinguish between images from ten different classes:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:150 +msgid "" +"We simulate having multiple datasets from multiple organizations (also " +"called the \"cross-silo\" setting in federated learning) by splitting the" +" original CIFAR-10 dataset into multiple partitions. Each partition will " +"represent the data from a single organization. We're doing this purely " +"for experimentation purposes, in the real world there's no need for data " +"splitting because each organization already has their own data (so the " +"data is naturally partitioned)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:152 +msgid "" +"Each organization will act as a client in the federated learning system. " +"So having ten organizations participate in a federation means having ten " +"clients connected to the federated learning server:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:172 +msgid "" +"Let's now load the CIFAR-10 training and test set, partition them into " +"ten smaller datasets (each split into training and validation set), and " +"wrap the resulting partitions by creating a PyTorch ``DataLoader`` for " +"each of them:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:222 +msgid "" +"We now have a list of ten training sets and ten validation sets " +"(``trainloaders`` and ``valloaders``) representing the data of ten " +"different organizations. Each ``trainloader``/``valloader`` pair contains" +" 4500 training examples and 500 validation examples. There's also a " +"single ``testloader`` (we did not split the test set). Again, this is " +"only necessary for building research or educational systems, actual " +"federated learning systems have their data naturally distributed across " +"multiple partitions." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:225 +msgid "" +"Let's take a look at the first batch of images and labels in the first " +"training set (i.e., ``trainloaders[0]``) before we move on:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:264 +msgid "" +"The output above shows a random batch of images from the first " +"``trainloader`` in our list of ten ``trainloaders``. It also prints the " +"labels associated with each image (i.e., one of the ten possible labels " +"we've seen above). If you run the cell again, you should see another " +"batch of images." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:276 +msgid "Step 1: Centralized Training with PyTorch" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:287 +msgid "" +"Next, we're going to use PyTorch to define a simple convolutional neural " +"network. This introduction assumes basic familiarity with PyTorch, so it " +"doesn't cover the PyTorch-related aspects in full detail. If you want to " +"dive deeper into PyTorch, we recommend `DEEP LEARNING WITH PYTORCH: A 60 " +"MINUTE BLITZ " +"`__." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:299 +msgid "Defining the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:301 +msgid "" +"We use the simple CNN described in the `PyTorch tutorial " +"`__:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:338 +msgid "Let's continue with the usual training and test functions:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:398 +msgid "Training the model" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:400 +msgid "" +"We now have all the basic building blocks we need: a dataset, a model, a " +"training function, and a test function. Let's put them together to train " +"the model on the dataset of one of our organizations " +"(``trainloaders[0]``). This simulates the reality of most machine " +"learning projects today: each organization has their own data and trains " +"models only on this internal data:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:430 +msgid "" +"Training the simple CNN on our CIFAR-10 split for 5 epochs should result " +"in a test set accuracy of about 41%, which is not good, but at the same " +"time, it doesn't really matter for the purposes of this tutorial. The " +"intent was just to show a simplistic centralized training pipeline that " +"sets the stage for what comes next - federated learning!" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:442 +msgid "Step 2: Federated Learning with Flower" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:444 +msgid "" +"Step 1 demonstrated a simple centralized training pipeline. All data was " +"in one place (i.e., a single ``trainloader`` and a single ``valloader``)." +" Next, we'll simulate a situation where we have multiple datasets in " +"multiple organizations and where we train a model over these " +"organizations using federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:456 +msgid "Updating model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:458 +msgid "" +"In federated learning, the server sends the global model parameters to " +"the client, and the client updates the local model with the parameters " +"received from the server. It then trains the model on the local data " +"(which changes the model parameters locally) and sends the " +"updated/changed model parameters back to the server (or, alternatively, " +"it sends just the gradients back to the server, not the full model " +"parameters)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:460 +msgid "" +"We need two helper functions to update the local model with parameters " +"received from the server and to get the updated model parameters from the" +" local model: ``set_parameters`` and ``get_parameters``. The following " +"two functions do just that for the PyTorch model above." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:462 +msgid "" +"The details of how this works are not really important here (feel free to" +" consult the PyTorch documentation if you want to learn more). In " +"essence, we use ``state_dict`` to access PyTorch model parameter tensors." +" The parameter tensors are then converted to/from a list of NumPy " +"ndarray's (which Flower knows how to serialize/deserialize):" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:490 +msgid "Implementing a Flower client" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:492 +msgid "" +"With that out of the way, let's move on to the interesting part. " +"Federated learning systems consist of a server and multiple clients. In " +"Flower, we create clients by implementing subclasses of " +"``flwr.client.Client`` or ``flwr.client.NumPyClient``. We use " +"``NumPyClient`` in this tutorial because it is easier to implement and " +"requires us to write less boilerplate." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:494 +msgid "" +"To implement the Flower client, we create a subclass of " +"``flwr.client.NumPyClient`` and implement the three methods " +"``get_parameters``, ``fit``, and ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:496 +msgid "``get_parameters``: Return the current local model parameters" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:497 +msgid "" +"``fit``: Receive model parameters from the server, train the model " +"parameters on the local data, and return the (updated) model parameters " +"to the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:498 +msgid "" +"``evaluate``: Receive model parameters from the server, evaluate the " +"model parameters on the local data, and return the evaluation result to " +"the server" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:500 +msgid "" +"We mentioned that our clients will use the previously defined PyTorch " +"components for model training and evaluation. Let's see a simple Flower " +"client implementation that brings everything together:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:537 +msgid "" +"Our class ``FlowerClient`` defines how local training/evaluation will be " +"performed and allows Flower to call the local training/evaluation through" +" ``fit`` and ``evaluate``. Each instance of ``FlowerClient`` represents a" +" *single client* in our federated learning system. Federated learning " +"systems have multiple clients (otherwise, there's not much to federate), " +"so each client will be represented by its own instance of " +"``FlowerClient``. If we have, for example, three clients in our workload," +" then we'd have three instances of ``FlowerClient``. Flower calls " +"``FlowerClient.fit`` on the respective instance when the server selects a" +" particular client for training (and ``FlowerClient.evaluate`` for " +"evaluation)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:541 +msgid "Using the Virtual Client Engine" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:543 +msgid "" +"In this notebook, we want to simulate a federated learning system with 10" +" clients on a single machine. This means that the server and all 10 " +"clients will live on a single machine and share resources such as CPU, " +"GPU, and memory. Having 10 clients would mean having 10 instances of " +"``FlowerClient`` in memory. Doing this on a single machine can quickly " +"exhaust the available memory resources, even if only a subset of these " +"clients participates in a single round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:545 +msgid "" +"In addition to the regular capabilities where server and clients run on " +"multiple machines, Flower, therefore, provides special simulation " +"capabilities that create ``FlowerClient`` instances only when they are " +"actually necessary for training or evaluation. To enable the Flower " +"framework to create clients when necessary, we need to implement a " +"function called ``client_fn`` that creates a ``FlowerClient`` instance on" +" demand. Flower calls ``client_fn`` whenever it needs an instance of one " +"particular client to call ``fit`` or ``evaluate`` (those instances are " +"usually discarded after use, so they should not keep any local state). " +"Clients are identified by a client ID, or short ``cid``. The ``cid`` can " +"be used, for example, to load different local data partitions for " +"different clients, as can be seen below:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:580 +msgid "Starting the training" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:582 +msgid "" +"We now have the class ``FlowerClient`` which defines client-side " +"training/evaluation and ``client_fn`` which allows Flower to create " +"``FlowerClient`` instances whenever it needs to call ``fit`` or " +"``evaluate`` on one particular client. The last step is to start the " +"actual simulation using ``flwr.simulation.start_simulation``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:584 +msgid "" +"The function ``start_simulation`` accepts a number of arguments, amongst " +"them the ``client_fn`` used to create ``FlowerClient`` instances, the " +"number of clients to simulate (``num_clients``), the number of federated " +"learning rounds (``num_rounds``), and the strategy. The strategy " +"encapsulates the federated learning approach/algorithm, for example, " +"*Federated Averaging* (FedAvg)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:586 +msgid "" +"Flower has a number of built-in strategies, but we can also use our own " +"strategy implementations to customize nearly all aspects of the federated" +" learning approach. For this example, we use the built-in ``FedAvg`` " +"implementation and customize it using a few basic parameters. The last " +"step is the actual call to ``start_simulation`` which - you guessed it - " +"starts the simulation:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:628 +msgid "Behind the scenes" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:630 +msgid "So how does this work? How does Flower execute this simulation?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:632 +#, python-format +msgid "" +"When we call ``start_simulation``, we tell Flower that there are 10 " +"clients (``num_clients=10``). Flower then goes ahead an asks the " +"``FedAvg`` strategy to select clients. ``FedAvg`` knows that it should " +"select 100% of the available clients (``fraction_fit=1.0``), so it goes " +"ahead and selects 10 random clients (i.e., 100% of 10)." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:634 +msgid "" +"Flower then asks the selected 10 clients to train the model. When the " +"server receives the model parameter updates from the clients, it hands " +"those updates over to the strategy (*FedAvg*) for aggregation. The " +"strategy aggregates those updates and returns the new global model, which" +" then gets used in the next round of federated learning." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:646 +msgid "Where's the accuracy?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:648 +msgid "" +"You may have noticed that all metrics except for ``losses_distributed`` " +"are empty. Where did the ``{\"accuracy\": float(accuracy)}`` go?" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:650 +msgid "" +"Flower can automatically aggregate losses returned by individual clients," +" but it cannot do the same for metrics in the generic metrics dictionary " +"(the one with the ``accuracy`` key). Metrics dictionaries can contain " +"very different kinds of metrics and even key/value pairs that are not " +"metrics at all, so the framework does not (and can not) know how to " +"handle these automatically." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:652 +msgid "" +"As users, we need to tell the framework how to handle/aggregate these " +"custom metrics, and we do so by passing metric aggregation functions to " +"the strategy. The strategy will then call these functions whenever it " +"receives fit or evaluate metrics from clients. The two possible functions" +" are ``fit_metrics_aggregation_fn`` and " +"``evaluate_metrics_aggregation_fn``." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:654 +msgid "" +"Let's create a simple weighted averaging function to aggregate the " +"``accuracy`` metric we return from ``evaluate``:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:680 +msgid "" +"The only thing left to do is to tell the strategy to call this function " +"whenever it receives evaluation metric dictionaries from the clients:" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:717 +msgid "" +"We now have a full system that performs federated training and federated " +"evaluation. It uses the ``weighted_average`` function to aggregate custom" +" evaluation metrics and calculates a single ``accuracy`` metric across " +"all clients on the server side." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:719 +msgid "" +"The other two categories of metrics (``losses_centralized`` and " +"``metrics_centralized``) are still empty because they only apply when " +"centralized evaluation is being used. Part two of the Flower tutorial " +"will cover centralized evaluation." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:731 +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:351 +msgid "Final remarks" +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:733 +msgid "" +"Congratulations, you just trained a convolutional neural network, " +"federated over 10 clients! With that, you understand the basics of " +"federated learning with Flower. The same approach you've seen can be used" +" with other machine learning frameworks (not just PyTorch) and tasks (not" +" just CIFAR-10 images classification), for example NLP with Hugging Face " +"Transformers or speech with SpeechBrain." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:735 +msgid "" +"In the next notebook, we're going to cover some more advanced concepts. " +"Want to customize your strategy? Initialize parameters on the server " +"side? Or evaluate the aggregated model on the server side? We'll cover " +"all this and more in the next tutorial." +msgstr "" + +#: ../../source/tutorial-series-get-started-with-flower-pytorch.ipynb:753 +msgid "" +"The `Flower Federated Learning Tutorial - Part 2 " +"`__ goes into more depth about strategies and all " +"the advanced things you can build with them." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:9 +msgid "Use a federated learning strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:11 +msgid "" +"Welcome to the next part of the federated learning tutorial. In previous " +"parts of this tutorial, we introduced federated learning with PyTorch and" +" Flower (`part 1 `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:13 +msgid "" +"In this notebook, we'll begin to customize the federated learning system " +"we built in the introductory notebook (again, using `Flower " +"`__ and `PyTorch `__)." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:17 +msgid "Let's move beyond FedAvg with Flower strategies!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:309 +msgid "Strategy customization" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:311 +msgid "" +"So far, everything should look familiar if you've worked through the " +"introductory notebook. With that, we're ready to introduce a number of " +"new features." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:323 +msgid "Server-side parameter **initialization**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:325 +msgid "" +"Flower, by default, initializes the global model by asking one random " +"client for the initial parameters. In many cases, we want more control " +"over parameter initialization though. Flower therefore allows you to " +"directly pass the initial parameters to the Strategy:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:370 +msgid "" +"Passing ``initial_parameters`` to the ``FedAvg`` strategy prevents Flower" +" from asking one of the clients for the initial parameters. If we look " +"closely, we can see that the logs do not show any calls to the " +"``FlowerClient.get_parameters`` method." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:382 +msgid "Starting with a customized strategy" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:384 +msgid "" +"We've seen the function ``start_simulation`` before. It accepts a number " +"of arguments, amongst them the ``client_fn`` used to create " +"``FlowerClient`` instances, the number of clients to simulate " +"``num_clients``, the number of rounds ``num_rounds``, and the strategy." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:386 +msgid "" +"The strategy encapsulates the federated learning approach/algorithm, for " +"example, ``FedAvg`` or ``FedAdagrad``. Let's try to use a different " +"strategy this time:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:424 +msgid "Server-side parameter **evaluation**" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:426 +msgid "" +"Flower can evaluate the aggregated model on the server-side or on the " +"client-side. Client-side and server-side evaluation are similar in some " +"ways, but different in others." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:428 +msgid "" +"**Centralized Evaluation** (or *server-side evaluation*) is conceptually " +"simple: it works the same way that evaluation in centralized machine " +"learning does. If there is a server-side dataset that can be used for " +"evaluation purposes, then that's great. We can evaluate the newly " +"aggregated model after each round of training without having to send the " +"model to clients. We're also fortunate in the sense that our entire " +"evaluation dataset is available at all times." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:430 +msgid "" +"**Federated Evaluation** (or *client-side evaluation*) is more complex, " +"but also more powerful: it doesn't require a centralized dataset and " +"allows us to evaluate models over a larger set of data, which often " +"yields more realistic evaluation results. In fact, many scenarios require" +" us to use **Federated Evaluation** if we want to get representative " +"evaluation results at all. But this power comes at a cost: once we start " +"to evaluate on the client side, we should be aware that our evaluation " +"dataset can change over consecutive rounds of learning if those clients " +"are not always available. Moreover, the dataset held by each client can " +"also change over consecutive rounds. This can lead to evaluation results " +"that are not stable, so even if we would not change the model, we'd see " +"our evaluation results fluctuate over consecutive rounds." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:433 +msgid "" +"We've seen how federated evaluation works on the client side (i.e., by " +"implementing the ``evaluate`` method in ``FlowerClient``). Now let's see " +"how we can evaluate aggregated model parameters on the server-side:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:490 +msgid "Sending/receiving arbitrary values to/from clients" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:492 +msgid "" +"In some situations, we want to configure client-side execution (training," +" evaluation) from the server-side. One example for that is the server " +"asking the clients to train for a certain number of local epochs. Flower " +"provides a way to send configuration values from the server to the " +"clients using a dictionary. Let's look at an example where the clients " +"receive values from the server through the ``config`` parameter in " +"``fit`` (``config`` is also available in ``evaluate``). The ``fit`` " +"method receives the configuration dictionary through the ``config`` " +"parameter and can then read values from this dictionary. In this example," +" it reads ``server_round`` and ``local_epochs`` and uses those values to " +"improve the logging and configure the number of local training epochs:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:546 +msgid "" +"So how can we send this config dictionary from server to clients? The " +"built-in Flower Strategies provide way to do this, and it works similarly" +" to the way server-side evaluation works. We provide a function to the " +"strategy, and the strategy calls this function for every round of " +"federated learning:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:576 +msgid "" +"Next, we'll just pass this function to the FedAvg strategy before " +"starting the simulation:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:613 +msgid "" +"As we can see, the client logs now include the current round of federated" +" learning (which they read from the ``config`` dictionary). We can also " +"configure local training to run for one epoch during the first and second" +" round of federated learning, and then for two epochs during the third " +"round." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:615 +msgid "" +"Clients can also return arbitrary values to the server. To do so, they " +"return a dictionary from ``fit`` and/or ``evaluate``. We have seen and " +"used this concept throughout this notebook without mentioning it " +"explicitly: our ``FlowerClient`` returns a dictionary containing a custom" +" key/value pair as the third return value in ``evaluate``." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:627 +msgid "Scaling federated learning" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:629 +msgid "" +"As a last step in this notebook, let's see how we can use Flower to " +"experiment with a large number of clients." +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:651 +#, python-format +msgid "" +"We now have 1000 partitions, each holding 45 training and 5 validation " +"examples. Given that the number of training examples on each client is " +"quite small, we should probably train the model a bit longer, so we " +"configure the clients to perform 3 local training epochs. We should also " +"adjust the fraction of clients selected for training during each round " +"(we don't want all 1000 clients participating in every round), so we " +"adjust ``fraction_fit`` to ``0.05``, which means that only 5% of " +"available clients (so 50 clients) will be selected for training each " +"round:" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:699 +msgid "" +"In this notebook, we've seen how we can gradually enhance our system by " +"customizing the strategy, initializing parameters on the server side, " +"choosing a different strategy, and evaluating models on the server-side. " +"That's quite a bit of flexibility with so little code, right?" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:701 +msgid "" +"In the later sections, we've seen how we can communicate arbitrary values" +" between server and clients to fully customize client-side execution. " +"With that capability, we built a large-scale Federated Learning " +"simulation using the Flower Virtual Client Engine and ran an experiment " +"involving 1000 clients in the same workload - all in a Jupyter Notebook!" +msgstr "" + +#: ../../source/tutorial-series-use-a-federated-learning-strategy-pytorch.ipynb:719 +msgid "" +"The `Flower Federated Learning Tutorial - Part 3 " +"`__ shows how to build a fully custom ``Strategy`` " +"from scratch." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:9 +msgid "What is Federated Learning?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:13 +msgid "" +"In this tutorial, you will learn what federated learning is, build your " +"first system in Flower, and gradually extend it. If you work through all " +"parts of the tutorial, you will be able to build advanced federated " +"learning systems that approach the current state of the art in the field." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:15 +msgid "" +"🧑‍🏫 This tutorial starts at zero and expects no familiarity with " +"federated learning. Only a basic understanding of data science and Python" +" programming is assumed." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:17 +msgid "" +"`Star Flower on GitHub `__ ⭐️ and join " +"the open-source Flower community on Slack to connect, ask questions, and " +"get help: `Join Slack `__ 🌼 We'd love to " +"hear from you in the ``#introductions`` channel! And if anything is " +"unclear, head over to the ``#questions`` channel." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:19 +msgid "Let's get started!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:31 +msgid "Classic machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:33 +msgid "" +"Before we begin to discuss federated learning, let us quickly recap how " +"most machine learning works today." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:35 +msgid "" +"In machine learning, we have a model, and we have data. The model could " +"be a neural network (as depicted here), or something else, like classical" +" linear regression." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 +msgid "|e1dd4b4129b040bea23a894266227080|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 +msgid "Model and data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:47 +msgid "" +"We train the model using the data to perform a useful task. A task could " +"be to detect objects in images, transcribe an audio recording, or play a " +"game like Go." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 +msgid "|c0d4cc6a442948dca8da40d2440068d9|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 +msgid "Train model using data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:59 +msgid "" +"Now, in practice, the training data we work with doesn't originate on the" +" machine we train the model on. It gets created somewhere else." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:61 +msgid "" +"It originates on a smartphone by the user interacting with an app, a car " +"collecting sensor data, a laptop receiving input via the keyboard, or a " +"smart speaker listening to someone trying to sing a song." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 +msgid "|174e1e4fa1f149a19bfbc8bc1126f46a|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 +msgid "Data on a phone" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:73 +msgid "" +"What's also important to mention, this \"somewhere else\" is usually not " +"just one place, it's many places. It could be several devices all running" +" the same app. But it could also be several organizations, all generating" +" data for the same task." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 +msgid "|4e021a3dc08249d2a89daa3ab03c2714|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 +msgid "Data is on many devices" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:85 +msgid "" +"So to use machine learning, or any kind of data analysis, the approach " +"that has been used in the past was to collect all data on a central " +"server. This server can be somewhere in a data center, or somewhere in " +"the cloud." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 +msgid "|e74a1d5ce7eb49688651f2167a59065b|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 +msgid "Central data collection" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:97 +msgid "" +"Once all the data is collected in one place, we can finally use machine " +"learning algorithms to train our model on the data. This is the machine " +"learning approach that we've basically always relied on." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 +msgid "|eb29ec4c7aef4e93976795ed72df647e|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 +msgid "Central model training" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:130 +msgid "Challenges of classical machine learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:132 +msgid "" +"The classic machine learning approach we've just seen can be used in some" +" cases. Great examples include categorizing holiday photos, or analyzing " +"web traffic. Cases, where all the data is naturally available on a " +"centralized server." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 +msgid "|c2f699d8ac484f5081721a6f1511f70d|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 +msgid "Centralized possible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:144 +msgid "" +"But the approach can not be used in many other cases. Cases, where the " +"data is not available on a centralized server, or cases where the data " +"available on one server is not enough to train a good model." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 +msgid "|cf42accdacbf4e5eb4fa0503108ba7a7|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 +msgid "Centralized impossible" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:156 +msgid "" +"There are many reasons why the classic centralized machine learning " +"approach does not work for a large number of highly important real-world " +"use cases. Those reasons include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:158 +msgid "" +"**Regulations**: GDPR (Europe), CCPA (California), PIPEDA (Canada), LGPD " +"(Brazil), PDPL (Argentina), KVKK (Turkey), POPI (South Africa), FSS " +"(Russia), CDPR (China), PDPB (India), PIPA (Korea), APPI (Japan), PDP " +"(Indonesia), PDPA (Singapore), APP (Australia), and other regulations " +"protect sensitive data from being moved. In fact, those regulations " +"sometimes even prevent single organizations from combining their own " +"users' data for artificial intelligence training because those users live" +" in different parts of the world, and their data is governed by different" +" data protection regulations." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:160 +msgid "" +"**User preference**: In addition to regulation, there are use cases where" +" users just expect that no data leaves their device, ever. If you type " +"your passwords and credit card info into the digital keyboard of your " +"phone, you don't expect those passwords to end up on the server of the " +"company that developed that keyboard, do you? In fact, that use case was " +"the reason federated learning was invented in the first place." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:161 +msgid "" +"**Data volume**: Some sensors, like cameras, produce such a high data " +"volume that it is neither feasible nor economic to collect all the data " +"(due to, for example, bandwidth or communication efficiency). Think about" +" a national rail service with hundreds of train stations across the " +"country. If each of these train stations is outfitted with a number of " +"security cameras, the volume of raw on-device data they produce requires " +"incredibly powerful and exceedingly expensive infrastructure to process " +"and store. And most of the data isn't even useful." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:164 +msgid "Examples where centralized machine learning does not work include:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:166 +msgid "" +"Sensitive healthcare records from multiple hospitals to train cancer " +"detection models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:167 +msgid "" +"Financial information from different organizations to detect financial " +"fraud" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:168 +msgid "Location data from your electric car to make better range prediction" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:169 +msgid "End-to-end encrypted messages to train better auto-complete models" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:171 +msgid "" +"The popularity of privacy-enhancing systems like the `Brave " +"`__ browser or the `Signal `__ " +"messenger shows that users care about privacy. In fact, they choose the " +"privacy-enhancing version over other alternatives, if such an alernative " +"exists. But what can we do to apply machine learning and data science to " +"these cases to utilize private data? After all, these are all areas that " +"would benefit significantly from recent advances in AI." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:186 +msgid "Federated learning" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:188 +msgid "" +"Federated learning simply reverses this approach. It enables machine " +"learning on distributed data by moving the training to the data, instead " +"of moving the data to the training. Here's the single-sentence " +"explanation:" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:190 +msgid "Central machine learning: move the data to the computation" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:191 +msgid "Federated (machine) learning: move the computation to the data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:193 +msgid "" +"By doing so, it enables us to use machine learning (and other data " +"science approaches) in areas where it wasn't possible before. We can now " +"train excellent medical AI models by enabling different hospitals to work" +" together. We can solve financial fraud by training AI models on the data" +" of different financial institutions. We can build novel privacy-" +"enhancing applications (such as secure messaging) that have better built-" +"in AI than their non-privacy-enhancing alternatives. And those are just a" +" few of the examples that come to mind. As we deploy federated learning, " +"we discover more and more areas that can suddenly be reinvented because " +"they now have access to vast amounts of previously inaccessible data." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:196 +msgid "" +"So how does federated learning work, exactly? Let's start with an " +"intuitive explanation." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:199 +msgid "Federated learning in five steps" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:202 +msgid "Step 0: Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:204 +msgid "" +"We start by initializing the model on the server. This is exactly the " +"same in classic centralized learning: we initialize the model parameters," +" either randomly or from a previously saved checkpoint." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 +msgid "|5ec8356bc2564fa09178b1ceed5beccc|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 +msgid "Initialize global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:217 +msgid "" +"Step 1: Send model to a number of connected organizations/devices (client" +" nodes)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:219 +msgid "" +"Next, we send the parameters of the global model to the connected client " +"nodes (think: edge devices like smartphones or servers belonging to " +"organizations). This is to ensure that each participating node starts " +"their local training using the same model parameters. We often use only a" +" few of the connected nodes instead of all nodes. The reason for this is " +"that selecting more and more client nodes has diminishing returns." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 +msgid "|7c9329e97bd0430bad335ab605a897a7|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 +msgid "Send global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:232 +msgid "" +"Step 2: Train model locally on the data of each organization/device " +"(client node)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:234 +msgid "" +"Now that all (selected) client nodes have the latest version of the " +"global model parameters, they start the local training. They use their " +"own local dataset to train their own local model. They don't train the " +"model until full convergence, but they only train for a little while. " +"This could be as little as one epoch on the local data, or even just a " +"few steps (mini-batches)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 +msgid "|88002bbce1094ba1a83c9151df18f707|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 +msgid "Train on local data" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:247 +msgid "Step 3: Return model updates back to the server" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:249 +msgid "" +"After local training, each client node has a slightly different version " +"of the model parameters they originally received. The parameters are all " +"different because each client node has different examples in its local " +"dataset. The client nodes then send those model updates back to the " +"server. The model updates they send can either be the full model " +"parameters or just the gradients that were accumulated during local " +"training." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 +msgid "|391766aee87c482c834c93f7c22225e2|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 +msgid "Send model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:262 +msgid "Step 4: Aggregate model updates into a new global model" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:264 +msgid "" +"The server receives model updates from the selected client nodes. If it " +"selected 100 client nodes, it now has 100 slightly different versions of " +"the original global model, each trained on the local data of one client. " +"But didn't we want to have one model that contains the learnings from the" +" data of all 100 client nodes?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:266 +msgid "" +"In order to get one single model, we have to combine all the model " +"updates we received from the client nodes. This process is called " +"*aggregation*, and there are many different ways to do it. The most basic" +" way to do it is called *Federated Averaging* (`McMahan et al., 2016 " +"`__), often abbreviated as *FedAvg*. " +"*FedAvg* takes the 100 model updates and, as the name suggests, averages " +"them. To be more precise, it takes the *weighted average* of the model " +"updates, weighted by the number of examples each client used for " +"training. The weighting is important to make sure that each data example " +"has the same \"influence\" on the resulting global model. If one client " +"has 10 examples, and another client has 100 examples, then - without " +"weighting - each of the 10 examples would influence the global model ten " +"times as much as each of the 100 examples." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 +msgid "|93b9a15bd27f4e91b40f642c253dfaac|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 +msgid "Aggregate model updates" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:280 +msgid "Step 5: Repeat steps 1 to 4 until the model converges" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:282 +msgid "" +"Steps 1 to 4 are what we call a single round of federated learning. The " +"global model parameters get sent to the participating client nodes (step " +"1), the client nodes train on their local data (step 2), they send their " +"updated models to the server (step 3), and the server then aggregates the" +" model updates to get a new version of the global model (step 4)." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:284 +msgid "" +"During a single round, each client node that participates in that " +"iteration only trains for a little while. This means that after the " +"aggregation step (step 4), we have a model that has been trained on all " +"the data of all participating client nodes, but only for a little while. " +"We then have to repeat this training process over and over again to " +"eventually arrive at a fully trained model that performs well across the " +"data of all client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:289 +msgid "" +"Congratulations, you now understand the basics of federated learning. " +"There's a lot more to discuss, of course, but that was federated learning" +" in a nutshell. In later parts of this tutorial, we will go into more " +"detail. Interesting questions include: How can we select the best client " +"nodes that should participate in the next round? What's the best way to " +"aggregate model updates? How can we handle failing client nodes " +"(stragglers)?" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:294 +msgid "" +"Just like we can train a model on the decentralized data of different " +"client nodes, we can also evaluate the model on that data to receive " +"valuable metrics. This is called federated evaluation, sometimes " +"abbreviated as FE. In fact, federated evaluation is an integral part of " +"most federated learning systems." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:297 +msgid "Federated analytics" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:299 +msgid "" +"In many cases, machine learning isn't necessary to derive value from " +"data. Data analysis can yield valuable insights, but again, there's often" +" not enough data to get a clear answer. What's the average age at which " +"people develop a certain type of health condition? Federated analytics " +"enables such queries over multiple client nodes. It is usually used in " +"conjunction with other privacy-enhancing technologies like secure " +"aggregation to prevent the server from seeing the results submitted by " +"individual client nodes." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:303 +msgid "Differential Privacy" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:305 +msgid "" +"Differential privacy (DP) is often mentioned in the context of Federated " +"Learning. It is a privacy-preserving method used when analyzing and " +"sharing statistical data, ensuring the privacy of individual " +"participants. DP achieves this by adding statistical noise to the model " +"updates, ensuring any individual participants’ information cannot be " +"distinguished or re-identified. This technique can be considered an " +"optimization that provides a quantifiable privacy protection measure." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:326 +msgid "Flower" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:328 +msgid "" +"Federated learning, federated evaluation, and federated analytics require" +" infrastructure to move machine learning models back and forth, train and" +" evaluate them on local data, and then aggregate the updated models. " +"Flower provides the infrastructure to do exactly that in an easy, " +"scalable, and secure way. In short, Flower presents a unified approach to" +" federated learning, analytics, and evaluation. It allows the user to " +"federate any workload, any ML framework, and any programming language." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 +msgid "|a23d9638f96342ef9d25209951e2d564|" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 +msgid "" +"Flower federated learning server and client nodes (car, scooter, personal" +" computer, roomba, and phone)" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:353 +msgid "" +"Congratulations, you just learned the basics of federated learning and " +"how it relates to the classic (centralized) machine learning!" +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:355 +msgid "" +"In the next part of this tutorial, we are going to build a first " +"federated learning system with Flower." +msgstr "" + +#: ../../source/tutorial-series-what-is-federated-learning.ipynb:373 +msgid "" +"The `Flower Federated Learning Tutorial - Part 1 " +"`__ shows how to build a simple federated learning system " +"with PyTorch and Flower." +msgstr "" + diff --git a/doc/locales/zh_Hans/LC_MESSAGES/sphinx.po b/doc/locales/zh_Hans/LC_MESSAGES/sphinx.po new file mode 100644 index 000000000000..98f300b872e3 --- /dev/null +++ b/doc/locales/zh_Hans/LC_MESSAGES/sphinx.po @@ -0,0 +1,37 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2022 Flower Labs GmbH +# This file is distributed under the same license as the Flower package. +# FIRST AUTHOR , 2023. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: Flower main\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2023-11-23 18:31+0100\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language: zh_Hans\n" +"Language-Team: zh_Hans \n" +"Plural-Forms: nplurals=1; plural=0;\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.13.1\n" + +#: ../../source/_templates/base.html:18 +msgid "About these documents" +msgstr "" + +#: ../../source/_templates/base.html:21 +msgid "Index" +msgstr "" + +#: ../../source/_templates/base.html:24 +msgid "Search" +msgstr "" + +#: ../../source/_templates/base.html:27 +msgid "Copyright" +msgstr "" + diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 8d6f78f3088a..111920d5602b 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -93,7 +93,7 @@ Prior to local training, we require loading the HIGGS dataset from Flower Datase fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) # Load the partition for this `node_id` - partition = fds.load_partition(idx=args.node_id, split="train") + partition = fds.load_partition(node_id=args.node_id, split="train") partition.set_format("numpy") In this example, we split the dataset into two partitions with uniform distribution (:code:`IidPartitioner(num_partitions=2)`). diff --git a/examples/doc/source/_static/.gitignore b/examples/doc/source/_static/.gitignore new file mode 100644 index 000000000000..c2412a5912cc --- /dev/null +++ b/examples/doc/source/_static/.gitignore @@ -0,0 +1,5 @@ +* +!custom.css +!favicon.ico +!flower-logo.png +!tmux_jtop_view.gif diff --git a/examples/doc/source/_static/diagram.png b/examples/doc/source/_static/diagram.png deleted file mode 100644 index 66d8855c859f..000000000000 Binary files a/examples/doc/source/_static/diagram.png and /dev/null differ diff --git a/examples/mt-pytorch-callable/README.md b/examples/mt-pytorch-callable/README.md new file mode 100644 index 000000000000..65ef000c26f2 --- /dev/null +++ b/examples/mt-pytorch-callable/README.md @@ -0,0 +1,49 @@ +# Deploy 🧪 + +🧪 = this page covers experimental features that might change in future versions of Flower + +This how-to guide describes the deployment of a long-running Flower server. + +## Preconditions + +Let's assume the following project structure: + +```bash +$ tree . +. +└── client.py +├── driver.py +├── requirements.txt +``` + +## Install dependencies + +```bash +pip install -r requirements.txt +``` + +## Start the long-running Flower server + +```bash +flower-server --insecure +``` + +## Start the long-running Flower client + +In a new terminal window, start the first long-running Flower client: + +```bash +flower-client --callable client:flower +``` + +In yet another new terminal window, start the second long-running Flower client: + +```bash +flower-client --callable client:flower +``` + +## Start the Driver script + +```bash +python driver.py +``` diff --git a/examples/mt-pytorch-callable/client.py b/examples/mt-pytorch-callable/client.py new file mode 100644 index 000000000000..6f9747784ae0 --- /dev/null +++ b/examples/mt-pytorch-callable/client.py @@ -0,0 +1,123 @@ +import warnings +from collections import OrderedDict + +import flwr as fl +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR10 +from torchvision.transforms import Compose, Normalize, ToTensor +from tqdm import tqdm + + +# ############################################################################# +# 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader +# ############################################################################# + +warnings.filterwarnings("ignore", category=UserWarning) +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, epochs): + """Train the model on the training set.""" + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + for _ in range(epochs): + for images, labels in tqdm(trainloader): + optimizer.zero_grad() + criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() + optimizer.step() + + +def test(net, testloader): + """Validate the model on the test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for images, labels in tqdm(testloader): + outputs = net(images.to(DEVICE)) + labels = labels.to(DEVICE) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def load_data(): + """Load CIFAR-10 (training and test set).""" + trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + trainset = CIFAR10("./data", train=True, download=True, transform=trf) + testset = CIFAR10("./data", train=False, download=True, transform=trf) + return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) + + +# ############################################################################# +# 2. Federation of the pipeline with Flower +# ############################################################################# + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) +trainloader, testloader = load_data() + + +# Define Flower client +class FlowerClient(fl.client.NumPyClient): + def get_parameters(self, config): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + def set_parameters(self, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + def fit(self, parameters, config): + self.set_parameters(parameters) + train(net, trainloader, epochs=1) + return self.get_parameters(config={}), len(trainloader.dataset), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + loss, accuracy = test(net, testloader) + return loss, len(testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + """.""" + return FlowerClient().to_client() + + +# To run this: `flower-client --callable client:flower` +flower = fl.flower.Flower( + client_fn=client_fn, +) + + +if __name__ == "__main__": + # Start Flower client + fl.client.start_client( + server_address="0.0.0.0:9092", + client=FlowerClient().to_client(), + transport="grpc-rere", + ) diff --git a/examples/mt-pytorch-callable/driver.py b/examples/mt-pytorch-callable/driver.py new file mode 100644 index 000000000000..1248672b6813 --- /dev/null +++ b/examples/mt-pytorch-callable/driver.py @@ -0,0 +1,25 @@ +from typing import List, Tuple + +import flwr as fl +from flwr.common import Metrics + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +# Define strategy +strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) + +# Start Flower driver +fl.driver.start_driver( + server_address="0.0.0.0:9091", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/mt-pytorch-callable/pyproject.toml b/examples/mt-pytorch-callable/pyproject.toml new file mode 100644 index 000000000000..0d1a91836006 --- /dev/null +++ b/examples/mt-pytorch-callable/pyproject.toml @@ -0,0 +1,16 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "quickstart-pytorch" +version = "0.1.0" +description = "PyTorch Federated Learning Quickstart with Flower" +authors = ["The Flower Authors "] + +[tool.poetry.dependencies] +python = ">=3.8,<3.11" +flwr = { path = "../../", develop = true, extras = ["simulation", "rest"] } +torch = "1.13.1" +torchvision = "0.14.1" +tqdm = "4.65.0" diff --git a/examples/mt-pytorch-callable/requirements.txt b/examples/mt-pytorch-callable/requirements.txt new file mode 100644 index 000000000000..797ca6db6244 --- /dev/null +++ b/examples/mt-pytorch-callable/requirements.txt @@ -0,0 +1,4 @@ +flwr>=1.0, <2.0 +torch==1.13.1 +torchvision==0.14.1 +tqdm==4.65.0 diff --git a/examples/mt-pytorch-callable/run.sh b/examples/mt-pytorch-callable/run.sh new file mode 100755 index 000000000000..d2bf34f834b1 --- /dev/null +++ b/examples/mt-pytorch-callable/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ + +# Download the CIFAR-10 dataset +python -c "from torchvision.datasets import CIFAR10; CIFAR10('./data', download=True)" + +echo "Starting server" +python server.py & +sleep 3 # Sleep for 3s to give the server enough time to start + +for i in `seq 0 1`; do + echo "Starting client $i" + python client.py & +done + +# Enable CTRL+C to stop all background processes +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM +# Wait for all background processes to complete +wait diff --git a/examples/mt-pytorch-callable/server.py b/examples/mt-pytorch-callable/server.py new file mode 100644 index 000000000000..fe691a88aba0 --- /dev/null +++ b/examples/mt-pytorch-callable/server.py @@ -0,0 +1,25 @@ +from typing import List, Tuple + +import flwr as fl +from flwr.common import Metrics + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +# Define strategy +strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) + +# Start Flower server +fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md index f748894f4971..6de0dcf7ab32 100644 --- a/examples/quickstart-pytorch/README.md +++ b/examples/quickstart-pytorch/README.md @@ -1,7 +1,6 @@ # Flower Example using PyTorch -This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. -Running this example in itself is quite easy. +This introductory example to Flower uses PyTorch, but deep knowledge of PyTorch is not necessarily required to run the example. However, it will help you understand how to adapt Flower to your use case. Running this example in itself is quite easy. This example uses [Flower Datasets](https://flower.dev/docs/datasets/) to download, partition and preprocess the CIFAR-10 dataset. ## Project Setup @@ -56,18 +55,20 @@ Afterwards you are ready to start the Flower server as well as the clients. You python3 server.py ``` -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. +Now you are ready to start the Flower clients which will participate in the learning. We need to specify the node id to +use different partitions of the data on different nodes. To do so simply open two more terminal windows and run the +following commands. Start client 1 in the first terminal: ```shell -python3 client.py +python3 client.py --node-id 0 ``` Start client 2 in the second terminal: ```shell -python3 client.py +python3 client.py --node-id 1 ``` You will see that PyTorch is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) for a detailed explanation. diff --git a/examples/quickstart-pytorch/client.py b/examples/quickstart-pytorch/client.py index 6db7c8a855a0..8ce19a45403d 100644 --- a/examples/quickstart-pytorch/client.py +++ b/examples/quickstart-pytorch/client.py @@ -1,12 +1,13 @@ +import argparse import warnings from collections import OrderedDict import flwr as fl +from flwr_datasets import FederatedDataset import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.data import DataLoader -from torchvision.datasets import CIFAR10 from torchvision.transforms import Compose, Normalize, ToTensor from tqdm import tqdm @@ -45,7 +46,9 @@ def train(net, trainloader, epochs): criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for _ in range(epochs): - for images, labels in tqdm(trainloader): + for batch in tqdm(trainloader, "Training"): + images = batch["img"] + labels = batch["label"] optimizer.zero_grad() criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() optimizer.step() @@ -56,30 +59,53 @@ def test(net, testloader): criterion = torch.nn.CrossEntropyLoss() correct, loss = 0, 0.0 with torch.no_grad(): - for images, labels in tqdm(testloader): + for batch in tqdm(testloader, "Testing"): + images = batch["img"] + labels = batch["label"] outputs = net(images.to(DEVICE)) - labels = labels.to(DEVICE) loss += criterion(outputs, labels).item() correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() accuracy = correct / len(testloader.dataset) return loss, accuracy -def load_data(): - """Load CIFAR-10 (training and test set).""" - trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) - trainset = CIFAR10("./data", train=True, download=True, transform=trf) - testset = CIFAR10("./data", train=False, download=True, transform=trf) - return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) +def load_data(node_id): + """Load partition CIFAR10 data.""" + fds = FederatedDataset(dataset="cifar10", partitioners={"train": 3}) + partition = fds.load_partition(node_id) + # Divide data on each node: 80% train, 20% test + partition_train_test = partition.train_test_split(test_size=0.2) + pytorch_transforms = Compose( + [ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] + ) + + def apply_transforms(batch): + """Apply transforms to the partition from FederatedDataset.""" + batch["img"] = [pytorch_transforms(img) for img in batch["img"]] + return batch + + partition_train_test = partition_train_test.with_transform(apply_transforms) + trainloader = DataLoader(partition_train_test["train"], batch_size=32, shuffle=True) + testloader = DataLoader(partition_train_test["test"], batch_size=32) + return trainloader, testloader # ############################################################################# # 2. Federation of the pipeline with Flower # ############################################################################# +# Get node id +parser = argparse.ArgumentParser(description="Flower") +parser.add_argument( + "--node-id", + choices=[0, 1, 2], + type=int, + help="Partition of the dataset divided into 3 iid partitions created artificially.") +node_id = parser.parse_args().node_id + # Load model and data (simple CNN, CIFAR-10) net = Net().to(DEVICE) -trainloader, testloader = load_data() +trainloader, testloader = load_data(node_id=node_id) # Define Flower client diff --git a/examples/quickstart-pytorch/pyproject.toml b/examples/quickstart-pytorch/pyproject.toml index affdfee26d47..ec6a3af8c5b4 100644 --- a/examples/quickstart-pytorch/pyproject.toml +++ b/examples/quickstart-pytorch/pyproject.toml @@ -11,6 +11,7 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" flwr = ">=1.0,<2.0" -torch = "1.13.1" -torchvision = "0.14.1" +flwr-datasets = { extras = ["vision"], version = ">=0.0.2,<1.0.0" } +torch = "2.1.1" +torchvision = "0.16.1" tqdm = "4.65.0" diff --git a/examples/quickstart-pytorch/requirements.txt b/examples/quickstart-pytorch/requirements.txt index 797ca6db6244..4e321e2cd0c2 100644 --- a/examples/quickstart-pytorch/requirements.txt +++ b/examples/quickstart-pytorch/requirements.txt @@ -1,4 +1,5 @@ flwr>=1.0, <2.0 -torch==1.13.1 -torchvision==0.14.1 +flwr-datasets[vision]>=0.0.2, <1.0.0 +torch==2.1.1 +torchvision==0.16.1 tqdm==4.65.0 diff --git a/examples/quickstart-pytorch/run.sh b/examples/quickstart-pytorch/run.sh index d2bf34f834b1..cdace99bb8df 100755 --- a/examples/quickstart-pytorch/run.sh +++ b/examples/quickstart-pytorch/run.sh @@ -2,16 +2,13 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ -# Download the CIFAR-10 dataset -python -c "from torchvision.datasets import CIFAR10; CIFAR10('./data', download=True)" - echo "Starting server" python server.py & sleep 3 # Sleep for 3s to give the server enough time to start -for i in `seq 0 1`; do +for i in $(seq 0 1); do echo "Starting client $i" - python client.py & + python client.py --node-id "$i" & done # Enable CTRL+C to stop all background processes diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py index bc9735a2f657..a37edac32648 100644 --- a/examples/xgboost-comprehensive/client.py +++ b/examples/xgboost-comprehensive/client.py @@ -43,13 +43,15 @@ partitioner_type=partitioner_type, num_partitions=num_partitions ) fds = FederatedDataset( - dataset="jxie/higgs", partitioners={"train": partitioner}, resplitter=resplit + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, ) # Load the partition for this `node_id` log(INFO, "Loading partition...") node_id = args.node_id -partition = fds.load_partition(idx=node_id, split="train") +partition = fds.load_partition(node_id=node_id, split="train") partition.set_format("numpy") if args.centralised_eval: diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index 5414b5122154..bbfbb4134b8d 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" +flwr-nightly = ">=1.0,<2.0" flwr-datasets = ">=0.0.2,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-comprehensive/requirements.txt b/examples/xgboost-comprehensive/requirements.txt index c6b9c1a67894..c37ac2b6ad6d 100644 --- a/examples/xgboost-comprehensive/requirements.txt +++ b/examples/xgboost-comprehensive/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.0, <2.0 +flwr-nightly>=1.0, <2.0 flwr-datasets>=0.0.2, <1.0.0 xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py index e88580197128..b5eab59ba14d 100644 --- a/examples/xgboost-quickstart/client.py +++ b/examples/xgboost-quickstart/client.py @@ -63,7 +63,7 @@ def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core. # Load the partition for this `node_id` log(INFO, "Loading partition...") -partition = fds.load_partition(idx=args.node_id, split="train") +partition = fds.load_partition(node_id=args.node_id, split="train") partition.set_format("numpy") # Train/test splitting diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml index 74256846c693..d82535311e58 100644 --- a/examples/xgboost-quickstart/pyproject.toml +++ b/examples/xgboost-quickstart/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" +flwr-nightly = ">=1.0,<2.0" flwr-datasets = ">=0.0.1,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-quickstart/requirements.txt b/examples/xgboost-quickstart/requirements.txt index 9596a8d6cd02..aefd74097582 100644 --- a/examples/xgboost-quickstart/requirements.txt +++ b/examples/xgboost-quickstart/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.0, <2.0 +flwr-nightly>=1.0, <2.0 flwr-datasets>=0.0.1, <1.0.0 xgboost>=2.0.0, <3.0.0 diff --git a/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt b/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt index a88af0e28974..6db7ecd36987 100644 --- a/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt +++ b/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt @@ -23,11 +23,11 @@ typealias Properties = Map * The `Code` class defines client status codes used in the application. */ enum class Code(val value: Int) { - OK(1), - GET_PROPERTIES_NOT_IMPLEMENTED(2), - GET_PARAMETERS_NOT_IMPLEMENTED(3), - FIT_NOT_IMPLEMENTED(4), - EVALUATE_NOT_IMPLEMENTED(5); + OK(0), + GET_PROPERTIES_NOT_IMPLEMENTED(1), + GET_PARAMETERS_NOT_IMPLEMENTED(2), + FIT_NOT_IMPLEMENTED(3), + EVALUATE_NOT_IMPLEMENTED(4); companion object { fun fromInt(value: Int): Code = values().first { it.value == value } diff --git a/src/py/flwr/__init__.py b/src/py/flwr/__init__.py index d3cbf00747a4..e05799280339 100644 --- a/src/py/flwr/__init__.py +++ b/src/py/flwr/__init__.py @@ -17,12 +17,13 @@ from flwr.common.version import package_version as _package_version -from . import client, common, driver, server, simulation +from . import client, common, driver, flower, server, simulation __all__ = [ "client", "common", "driver", + "flower", "server", "simulation", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 0013b74c631c..b39dbbfc33c0 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -22,6 +22,7 @@ from typing import Callable, ContextManager, Optional, Tuple, Union from flwr.client.client import Client +from flwr.client.flower import Bwd, Flower, Fwd from flwr.client.typing import ClientFn from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address @@ -32,13 +33,15 @@ TRANSPORT_TYPE_REST, TRANSPORT_TYPES, ) -from flwr.common.logger import log +from flwr.common.logger import log, warn_experimental_feature from flwr.proto.task_pb2 import TaskIns, TaskRes +from .flower import load_callable from .grpc_client.connection import grpc_connection from .grpc_rere_client.connection import grpc_request_response -from .message_handler.message_handler import handle, handle_control_message +from .message_handler.message_handler import handle_control_message from .numpy_client import NumPyClient +from .workload_state import WorkloadState def run_client() -> None: @@ -48,6 +51,22 @@ def run_client() -> None: args = _parse_args_client().parse_args() print(args.server) + print(args.callable_dir) + print(args.callable) + + callable_dir = args.callable_dir + if callable_dir is not None: + sys.path.insert(0, callable_dir) + + def _load() -> Flower: + flower: Flower = load_callable(args.callable) + return flower + + return start_client( + server_address=args.server, + load_callable_fn=_load, + transport="grpc-rere", # Only + ) def _parse_args_client() -> argparse.ArgumentParser: @@ -58,8 +77,18 @@ def _parse_args_client() -> argparse.ArgumentParser: parser.add_argument( "--server", - help="Server address", default="0.0.0.0:9092", + help="Server address", + ) + parser.add_argument( + "--callable", + help="For example: `client:flower` or `project.package.module:wrapper.flower`", + ) + parser.add_argument( + "--callable-dir", + default="", + help="Add specified directory to the PYTHONPATH and load callable from there." + " Default: current working directory.", ) return parser @@ -84,6 +113,7 @@ def _check_actionable_client( def start_client( *, server_address: str, + load_callable_fn: Optional[Callable[[], Flower]] = None, client_fn: Optional[ClientFn] = None, client: Optional[Client] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, @@ -98,6 +128,8 @@ def start_client( The IPv4 or IPv6 address of the server. If the Flower server runs on the same machine on port 8080, then `server_address` would be `"[::]:8080"`. + load_callable_fn : Optional[Callable[[], Flower]] (default: None) + ... client_fn : Optional[ClientFn] A callable that instantiates a Client. (default: None) client : Optional[flwr.client.Client] @@ -146,20 +178,31 @@ class `flwr.client.Client` (default: None) """ event(EventType.START_CLIENT_ENTER) - _check_actionable_client(client, client_fn) + if load_callable_fn is None: + _check_actionable_client(client, client_fn) - if client_fn is None: - # Wrap `Client` instance in `client_fn` - def single_client_factory( - cid: str, # pylint: disable=unused-argument - ) -> Client: - if client is None: # Added this to keep mypy happy - raise Exception( - "Both `client_fn` and `client` are `None`, but one is required" - ) - return client # Always return the same instance + if client_fn is None: + # Wrap `Client` instance in `client_fn` + def single_client_factory( + cid: str, # pylint: disable=unused-argument + ) -> Client: + if client is None: # Added this to keep mypy happy + raise Exception( + "Both `client_fn` and `client` are `None`, but one is required" + ) + return client # Always return the same instance + + client_fn = single_client_factory + + def _load_app() -> Flower: + return Flower(client_fn=client_fn) - client_fn = single_client_factory + load_callable_fn = _load_app + else: + warn_experimental_feature("`load_callable_fn`") + + # At this point, only `load_callable_fn` should be used + # Both `client` and `client_fn` must not be used directly # Initialize connection context manager connection, address = _init_connection(transport, server_address) @@ -190,11 +233,18 @@ def single_client_factory( send(task_res) break + # Load app + app: Flower = load_callable_fn() + # Handle task message - task_res = handle(client_fn, task_ins) + fwd_msg: Fwd = Fwd( + task_ins=task_ins, + state=WorkloadState(state={}), + ) + bwd_msg: Bwd = app(fwd=fwd_msg) # Send - send(task_res) + send(bwd_msg.task_res) # Unregister node if delete_node is not None: diff --git a/src/py/flwr/client/flower.py b/src/py/flwr/client/flower.py new file mode 100644 index 000000000000..9eeb41887e24 --- /dev/null +++ b/src/py/flwr/client/flower.py @@ -0,0 +1,138 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower callable.""" + + +import importlib +from dataclasses import dataclass +from typing import Callable, cast + +from flwr.client.message_handler.message_handler import handle +from flwr.client.typing import ClientFn +from flwr.client.workload_state import WorkloadState +from flwr.proto.task_pb2 import TaskIns, TaskRes + + +@dataclass +class Fwd: + """.""" + + task_ins: TaskIns + state: WorkloadState + + +@dataclass +class Bwd: + """.""" + + task_res: TaskRes + state: WorkloadState + + +FlowerCallable = Callable[[Fwd], Bwd] + + +class Flower: + """Flower callable. + + Examples + -------- + Assuming a typical client implementation in `FlowerClient`, you can wrap it in a + Flower callable as follows: + + >>> class FlowerClient(NumPyClient): + >>> # ... + >>> + >>> def client_fn(cid): + >>> return FlowerClient().to_client() + >>> + >>> flower = Flower(client_fn) + + If the above code is in a Python module called `client`, it can be started as + follows: + + >>> flower-client --callable client:flower + + In this `client:flower` example, `client` refers to the Python module in which the + previous code lives in. `flower` refers to the global attribute `flower` that points + to an object of type `Flower` (a Flower callable). + """ + + def __init__( + self, + client_fn: ClientFn, # Only for backward compatibility + ) -> None: + self.client_fn = client_fn + + def __call__(self, fwd: Fwd) -> Bwd: + """.""" + # Execute the task + task_res = handle( + client_fn=self.client_fn, + task_ins=fwd.task_ins, + ) + return Bwd( + task_res=task_res, + state=WorkloadState(state={}), + ) + + +class LoadCallableError(Exception): + """.""" + + +def load_callable(module_attribute_str: str) -> Flower: + """Load the `Flower` object specified in a module attribute string. + + The module/attribute string should have the form :. Valid + examples include `client:flower` and `project.package.module:wrapper.flower`. It + must refer to a module on the PYTHONPATH, the module needs to have the specified + attribute, and the attribute must be of type `Flower`. + """ + module_str, _, attributes_str = module_attribute_str.partition(":") + if not module_str: + raise LoadCallableError( + f"Missing module in {module_attribute_str}", + ) from None + if not attributes_str: + raise LoadCallableError( + f"Missing attribute in {module_attribute_str}", + ) from None + + # Load module + try: + module = importlib.import_module(module_str) + except ModuleNotFoundError: + raise LoadCallableError( + f"Unable to load module {module_str}", + ) from None + + # Recursively load attribute + attribute = module + try: + for attribute_str in attributes_str.split("."): + attribute = getattr(attribute, attribute_str) + except AttributeError: + raise LoadCallableError( + f"Unable to load attribute {attributes_str} from module {module_str}", + ) from None + + # Check type + if not isinstance(attribute, Flower): + raise LoadCallableError( + f"Attribute {attributes_str} is not of type {Flower}", + ) from None + + return cast(Flower, attribute) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index b69228826e13..424e413dc484 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -16,7 +16,7 @@ from contextlib import contextmanager -from logging import DEBUG, ERROR, WARN +from logging import DEBUG, ERROR from pathlib import Path from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast @@ -28,7 +28,7 @@ ) from flwr.common import GRPC_MAX_MESSAGE_LENGTH from flwr.common.grpc import create_channel -from flwr.common.logger import log +from flwr.common.logger import log, warn_experimental_feature from flwr.proto.fleet_pb2 import ( CreateNodeRequest, DeleteNodeRequest, @@ -88,6 +88,8 @@ def grpc_request_response( create_node : Optional[Callable] delete_node : Optional[Callable] """ + warn_experimental_feature("`grpc-rere`") + if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() @@ -99,14 +101,6 @@ def grpc_request_response( channel.subscribe(on_channel_state_change) stub = FleetStub(channel) - log( - WARN, - """ - EXPERIMENTAL: `grpc-rere` is an experimental transport layer, it might change - considerably in future versions of Flower - """, - ) - # Necessary state to link TaskRes to TaskIns state: Dict[str, Optional[TaskIns]] = {KEY_TASK_INS: None} diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index e543d6565878..29d1562a86d3 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -16,7 +16,7 @@ import logging -from logging import LogRecord +from logging import WARN, LogRecord from logging.handlers import HTTPHandler from typing import Any, Dict, Optional, Tuple @@ -97,3 +97,17 @@ def configure( logger = logging.getLogger(LOGGER_NAME) # pylint: disable=invalid-name log = logger.log # pylint: disable=invalid-name + + +def warn_experimental_feature(name: str) -> None: + """Warn the user when they use an experimental feature.""" + log( + WARN, + """ + EXPERIMENTAL FEATURE: %s + + This is an experimental feature. It could change significantly or be removed + entirely in future versions of Flower. + """, + name, + ) diff --git a/src/py/flwr/flower/__init__.py b/src/py/flwr/flower/__init__.py new file mode 100644 index 000000000000..090c78062d02 --- /dev/null +++ b/src/py/flwr/flower/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2020 Adap GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower callable package.""" + + +from flwr.client.flower import Bwd as Bwd +from flwr.client.flower import Flower as Flower +from flwr.client.flower import Fwd as Fwd + +__all__ = [ + "Flower", + "Fwd", + "Bwd", +] diff --git a/src/py/flwr/server/strategy/fedxgb_nn_avg.py b/src/py/flwr/server/strategy/fedxgb_nn_avg.py index 020e0ef71267..f300633d0d9f 100644 --- a/src/py/flwr/server/strategy/fedxgb_nn_avg.py +++ b/src/py/flwr/server/strategy/fedxgb_nn_avg.py @@ -17,7 +17,7 @@ Strategy in the horizontal setting based on building Neural Network and averaging on prediction outcomes. -Paper: Coming +Paper: arxiv.org/abs/2304.07537 """ @@ -35,6 +35,13 @@ class FedXgbNnAvg(FedAvg): """Configurable FedXgbNnAvg strategy implementation.""" + def __init__(self, *args: Any, **kwargs: Any) -> None: + """Federated XGBoost [Ma et al., 2023] strategy. + + Implementation based on https://arxiv.org/abs/2304.07537. + """ + super().__init__(*args, **kwargs) + def __repr__(self) -> str: """Compute a string representation of the strategy.""" rep = f"FedXgbNnAvg(accept_failures={self.accept_failures})"