diff --git a/baselines/doc/source/_templates/base.html b/baselines/doc/source/_templates/base.html
index 06762386f0d7..1cee99053fbe 100644
--- a/baselines/doc/source/_templates/base.html
+++ b/baselines/doc/source/_templates/base.html
@@ -5,6 +5,7 @@
     <meta charset="utf-8"/>
     <meta name="viewport" content="width=device-width,initial-scale=1"/>
     <meta name="color-scheme" content="light dark">
+    <link rel="canonical" href="https://flower.dev/docs/baselines/{{ pagename }}.html">
 
     {%- if metatags %}{{ metatags }}{% endif -%}
 
diff --git a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py
index 6fa3be420168..d89eefeba9f2 100644
--- a/datasets/flwr_datasets/partitioner/iid_partitioner_test.py
+++ b/datasets/flwr_datasets/partitioner/iid_partitioner_test.py
@@ -56,8 +56,8 @@ def test_load_partition_size(self, num_partitions: int, num_rows: int) -> None:
         Only the correct data is tested in this method.
 
         In case the dataset is dividable among `num_partitions` the size of each
-        partition should be the same. This checks if the randomly chosen partition
-        has size as expected.
+        partition should be the same. This checks if the randomly chosen partition has
+        size as expected.
         """
         _, partitioner = _dummy_setup(num_partitions, num_rows)
         partition_size = num_rows // num_partitions
diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po
index 49ffa6e19fd2..eb1ae9a9d28b 100644
--- a/doc/locales/fr/LC_MESSAGES/framework-docs.po
+++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po
@@ -5050,7 +5050,7 @@ msgstr ""
 msgid ""
 "This can be achieved by customizing an existing strategy or by "
 "`implementing a custom strategy from scratch <https://flower.dev/docs"
-"/implementing-strategies.html>`_. Here's a nonsensical example that "
+"/how-to-implement-strategies.html>`_. Here's a nonsensical example that "
 "customizes :code:`FedAvg` by adding a custom ``\"hello\": \"world\"`` "
 "configuration key/value pair to the config dict of a *single client* "
 "(only the first client in the list, the other clients in this round to "
@@ -5058,7 +5058,7 @@ msgid ""
 msgstr ""
 "Ceci peut être réalisé en personnalisant une stratégie existante ou en "
 "`mettant en œuvre une stratégie personnalisée à partir de zéro "
-"<https://flower.dev/docs/implementing-strategies.html>`_. Voici un "
+"<https://flower.dev/docs/framework/how-to-implement-strategies.html>`_. Voici un "
 "exemple absurde qui personnalise :code:`FedAvg` en ajoutant une paire "
 "clé/valeur de configuration personnalisée ``\"hello\" : \"world\"`` au "
 "config dict d'un *seul client* (uniquement le premier client de la liste,"
@@ -7087,7 +7087,7 @@ msgid ""
 msgstr ""
 "L'écriture d'une stratégie entièrement personnalisée est un peu plus "
 "complexe, mais c'est celle qui offre le plus de souplesse. Lis le guide "
-"`Implémentation des stratégies <implementing-strategies.html>`_ pour en "
+"`Implémentation des stratégies <how-to-implement-strategies.html>`_ pour en "
 "savoir plus."
 
 #: ../../source/index.rst:31
@@ -10068,12 +10068,12 @@ msgstr ""
 #: ../../source/ref-changelog.md:517
 msgid ""
 "New documentation for [implementing strategies](https://flower.dev/docs"
-"/implementing-strategies.html) "
+"/how-to-implement-strategies.html) "
 "([#1097](https://github.com/adap/flower/pull/1097), "
 "[#1175](https://github.com/adap/flower/pull/1175))"
 msgstr ""
 "Nouvelle documentation pour [mettre en œuvre des "
-"stratégies](https://flower.dev/docs/implementing-strategies.html) "
+"stratégies](https://flower.dev/docs/framework/how-to-implement-strategies.html) "
 "([#1097](https://github.com/adap/flower/pull/1097), "
 "[#1175](https://github.com/adap/flower/pull/1175))"
 
diff --git a/doc/source/_static/tutorial/flower-any.jpeg b/doc/source/_static/tutorial/flower-any.jpeg
index f21df69bb927..ffbd48fd5876 100644
Binary files a/doc/source/_static/tutorial/flower-any.jpeg and b/doc/source/_static/tutorial/flower-any.jpeg differ
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 60c3e0b67c4b..28787a7aca8c 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -122,18 +122,18 @@
     # Renamed pages
     "installation": "how-to-install-flower.html",
     "configuring-clients.html": "how-to-configure-clients.html",
-    "quickstart_mxnet": "quickstart-mxnet.html",
-    "quickstart_pytorch_lightning": "quickstart-pytorch-lightning.html",
-    "quickstart_huggingface": "quickstart-huggingface.html",
-    "quickstart_pytorch": "quickstart-pytorch.html",
-    "quickstart_tensorflow": "quickstart-tensorflow.html",
-    "quickstart_scikitlearn": "quickstart-scikitlearn.html",
-    "quickstart_xgboost": "quickstart-xgboost.html",
+    "quickstart_mxnet": "tutorial-quickstart-mxnet.html",
+    "quickstart_pytorch_lightning": "tutorial-quickstart-pytorch-lightning.html",
+    "quickstart_huggingface": "tutorial-quickstart-huggingface.html",
+    "quickstart_pytorch": "tutorial-quickstart-pytorch.html",
+    "quickstart_tensorflow": "tutorial-quickstart-tensorflow.html",
+    "quickstart_scikitlearn": "tutorial-quickstart-scikitlearn.html",
+    "quickstart_xgboost": "tutorial-quickstart-xgboost.html",
     "example_walkthrough_pytorch_mnist": "example-walkthrough-pytorch-mnist.html",
-    "release_process": "release-process.html",
+    "release_process": "contributor-how-to-release-flower.html",
     "saving-progress": "how-to-save-and-load-model-checkpoints.html",
-    "writing-documentation": "write-documentation.html",
-    "apiref-binaries": "apiref-cli.html",
+    "writing-documentation": "contributor-how-to-write-documentation.html",
+    "apiref-binaries": "ref-api-cli.html",
     "fedbn-example-pytorch-from-centralized-to-federated": "example-fedbn-pytorch-from-centralized-to-federated.html",
     # Restructuring: tutorials
     "tutorial/Flower-0-What-is-FL": "tutorial-series-what-is-federated-learning.html",
diff --git a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst
index b2c55f06ac99..5ebaa337dde8 100644
--- a/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst
+++ b/doc/source/example-fedbn-pytorch-from-centralized-to-federated.rst
@@ -3,11 +3,11 @@ Example: FedBN in PyTorch - From Centralized To Federated
 
 This tutorial will show you how to use Flower to build a federated version of an existing machine learning workload with `FedBN <https://github.com/med-air/FedBN>`_, a federated training strategy designed for non-iid data.
 We are using PyTorch to train a Convolutional Neural Network(with Batch Normalization layers) on the CIFAR-10 dataset.
-When applying FedBN, only few changes needed compared to `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/example-pytorch-from-centralized-to-federated.html>`_.
+When applying FedBN, only few changes needed compared to `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/examples/pytorch-from-centralized-to-federated.html>`_.
 
 Centralized Training
 --------------------
-All files are revised based on `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/example-pytorch-from-centralized-to-federated.html>`_.
+All files are revised based on `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/examples/pytorch-from-centralized-to-federated.html>`_.
 The only thing to do is modifying the file called :code:`cifar.py`, revised part is shown below:
 
 The model architecture defined in class Net() is added with Batch Normalization layers accordingly.
@@ -50,8 +50,8 @@ Let's take the next step and use what we've built to create a federated learning
 Federated Training
 ------------------
 
-If you have read `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/example-pytorch-from-centralized-to-federated.html>`_, the following parts are easy to follow, onyl :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise.
-If not, please read the `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/example-pytorch-from-centralized-to-federated.html>`_. first.
+If you have read `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/examples/pytorch-from-centralized-to-federated.html>`_, the following parts are easy to follow, onyl :code:`get_parameters` and :code:`set_parameters` function in :code:`client.py` needed to revise.
+If not, please read the `Example: PyTorch - From Centralized To Federated <https://flower.dev/docs/examples/pytorch-from-centralized-to-federated.html>`_. first.
 
 Our example consists of one *server* and two *clients*. In FedBN, :code:`server.py` keeps unchanged, we can start the server directly.
 
diff --git a/doc/source/how-to-configure-clients.rst b/doc/source/how-to-configure-clients.rst
index db2c4ba052e5..26c132125ccf 100644
--- a/doc/source/how-to-configure-clients.rst
+++ b/doc/source/how-to-configure-clients.rst
@@ -86,7 +86,7 @@ Configuring individual clients
 
 In some cases, it is necessary to send different configuration values to different clients.
 
-This can be achieved by customizing an existing strategy or by `implementing a custom strategy from scratch <https://flower.dev/docs/implementing-strategies.html>`_. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value):
+This can be achieved by customizing an existing strategy or by `implementing a custom strategy from scratch <https://flower.dev/docs/framework/how-to-implement-strategies.html>`_. Here's a nonsensical example that customizes :code:`FedAvg` by adding a custom ``"hello": "world"`` configuration key/value pair to the config dict of a *single client* (only the first client in the list, the other clients in this round to not receive this "special" config value):
 
 .. code-block:: python
 
diff --git a/doc/source/how-to-use-strategies.rst b/doc/source/how-to-use-strategies.rst
index c11da9633755..6d24f97bd7f6 100644
--- a/doc/source/how-to-use-strategies.rst
+++ b/doc/source/how-to-use-strategies.rst
@@ -86,4 +86,4 @@ Server-side evaluation can be enabled by passing an evaluation function to :code
 Implement a novel strategy
 --------------------------
 
-Writing a fully custom strategy is a bit more involved, but it provides the most flexibility. Read the `Implementing Strategies <implementing-strategies.html>`_ guide to learn more.
+Writing a fully custom strategy is a bit more involved, but it provides the most flexibility. Read the `Implementing Strategies <how-to-implement-strategies.html>`_ guide to learn more.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 5207debe6b29..48f8d59ea9b7 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,6 +1,9 @@
 Flower Framework Documentation
 ==============================
 
+.. meta::
+   :description: Check out the documentation of the main Flower Framework enabling easy Python development for Federated Learning.
+
 Welcome to Flower's documentation. `Flower <https://flower.dev>`_ is a friendly federated learning framework.
 
 
diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md
index 9bb23c6bc025..a6838e88b4a4 100644
--- a/doc/source/ref-changelog.md
+++ b/doc/source/ref-changelog.md
@@ -2,6 +2,8 @@
 
 ## Unreleased
 
+- **Support custom** `ClientManager` **in** `start_driver()` ([#2292](https://github.com/adap/flower/pull/2292))
+
 - **Update REST API to support create and delete nodes** ([#2283](https://github.com/adap/flower/pull/2283))
 
 ### What's new?
@@ -536,7 +538,7 @@ We would like to give our **special thanks** to all the contributors who made Fl
 
   - New option to keep Ray running if Ray was already initialized in `start_simulation` ([#1177](https://github.com/adap/flower/pull/1177))
   - Add support for custom `ClientManager` as a `start_simulation` parameter ([#1171](https://github.com/adap/flower/pull/1171))
-  - New documentation for [implementing strategies](https://flower.dev/docs/implementing-strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175))
+  - New documentation for [implementing strategies](https://flower.dev/docs/framework/how-to-implement-strategies.html) ([#1097](https://github.com/adap/flower/pull/1097), [#1175](https://github.com/adap/flower/pull/1175))
   - New mobile-friendly documentation theme ([#1174](https://github.com/adap/flower/pull/1174))
   - Limit version range for (optional) `ray` dependency to include only compatible releases (`>=1.9.2,<1.12.0`) ([#1205](https://github.com/adap/flower/pull/1205))
 
diff --git a/doc/source/ref-example-projects.rst b/doc/source/ref-example-projects.rst
index 6a4761d7771d..b47bd8e48997 100644
--- a/doc/source/ref-example-projects.rst
+++ b/doc/source/ref-example-projects.rst
@@ -23,7 +23,7 @@ The TensorFlow/Keras quickstart example shows CIFAR-10 image classification
 with MobileNetV2:
 
 - `Quickstart TensorFlow (Code) <https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow>`_
-- `Quickstart TensorFlow (Tutorial) <https://flower.dev/docs/quickstart-tensorflow.html>`_
+- `Quickstart TensorFlow (Tutorial) <https://flower.dev/docs/framework/tutorial-quickstart-tensorflow.html>`_
 - `Quickstart TensorFlow (Blog Post) <https://flower.dev/blog/2020-12-11-federated-learning-in-less-than-20-lines-of-code>`_
 
 
@@ -34,7 +34,7 @@ The PyTorch quickstart example shows CIFAR-10 image classification
 with a simple Convolutional Neural Network:
 
 - `Quickstart PyTorch (Code) <https://github.com/adap/flower/tree/main/examples/quickstart-pytorch>`_
-- `Quickstart PyTorch (Tutorial) <https://flower.dev/docs/quickstart-pytorch.html>`_
+- `Quickstart PyTorch (Tutorial) <https://flower.dev/docs/framework/tutorial-quickstart-pytorch.html>`_
 
 
 PyTorch: From Centralized To Federated
@@ -43,7 +43,7 @@ PyTorch: From Centralized To Federated
 This example shows how a regular PyTorch project can be federated using Flower:
 
 - `PyTorch: From Centralized To Federated (Code) <https://github.com/adap/flower/tree/main/examples/pytorch-from-centralized-to-federated>`_
-- `PyTorch: From Centralized To Federated (Tutorial) <https://flower.dev/docs/example-pytorch-from-centralized-to-federated.html>`_
+- `PyTorch: From Centralized To Federated (Tutorial) <https://flower.dev/docs/framework/example-pytorch-from-centralized-to-federated.html>`_
 
 
 Federated Learning on Raspberry Pi and Nvidia Jetson
diff --git a/doc/source/tutorial-quickstart-android.rst b/doc/source/tutorial-quickstart-android.rst
index cb66cd6a77d7..9177236d5a7c 100644
--- a/doc/source/tutorial-quickstart-android.rst
+++ b/doc/source/tutorial-quickstart-android.rst
@@ -4,6 +4,9 @@
 Quickstart Android
 ==================
 
+.. meta::
+   :description: Read this Federated Learning quickstart tutorial for creating an Android app using Flower.
+
 Let's build a federated learning system using TFLite and Flower on Android!
 
 Please refer to the `full code example <https://github.com/adap/flower/tree/main/examples/android>`_ to learn more.
diff --git a/doc/source/tutorial-quickstart-fastai.rst b/doc/source/tutorial-quickstart-fastai.rst
index 27e6d87fed2e..63f5ac176082 100644
--- a/doc/source/tutorial-quickstart-fastai.rst
+++ b/doc/source/tutorial-quickstart-fastai.rst
@@ -4,6 +4,9 @@
 Quickstart fastai
 =================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with FastAI to train a vision model on CIFAR-10.
+
 Let's build a federated learning system using fastai and Flower!
 
 Please refer to the `full code example <https://github.com/adap/flower/tree/main/examples/quickstart-fastai>`_ to learn more.
diff --git a/doc/source/tutorial-quickstart-huggingface.rst b/doc/source/tutorial-quickstart-huggingface.rst
index 19255d215b36..7718e6558456 100644
--- a/doc/source/tutorial-quickstart-huggingface.rst
+++ b/doc/source/tutorial-quickstart-huggingface.rst
@@ -4,6 +4,9 @@
 Quickstart 🤗 Transformers
 ==========================
 
+.. meta::
+   :description: Check out this Federating Learning quickstart tutorial for using Flower with HuggingFace Transformers in order to fine-tune an LLM.
+
 Let's build a federated learning system using Hugging Face Transformers and Flower!
 
 We will leverage Hugging Face to federate the training of language models over multiple clients using Flower. 
diff --git a/doc/source/tutorial-quickstart-ios.rst b/doc/source/tutorial-quickstart-ios.rst
index 09a335492b5c..7c8007baaa75 100644
--- a/doc/source/tutorial-quickstart-ios.rst
+++ b/doc/source/tutorial-quickstart-ios.rst
@@ -4,6 +4,9 @@
 Quickstart iOS
 ==============
 
+.. meta::
+   :description: Read this Federated Learning quickstart tutorial for creating an iOS app using Flower to train a neural network on MNIST.
+
 In this tutorial we will learn how to train a Neural Network on MNIST using Flower and CoreML on iOS devices. 
 
 First of all, for running the Flower Python server, it is recommended to create a virtual environment and run everything within a `virtualenv <https://flower.dev/docs/recommended-env-setup.html>`_.
diff --git a/doc/source/tutorial-quickstart-jax.rst b/doc/source/tutorial-quickstart-jax.rst
index 2b5485199c84..945f231e112e 100644
--- a/doc/source/tutorial-quickstart-jax.rst
+++ b/doc/source/tutorial-quickstart-jax.rst
@@ -4,6 +4,9 @@
 Quickstart JAX
 ==============
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with Jax to train a linear regression model on a scikit-learn dataset.
+
 This tutorial will show you how to use Flower to build a federated version of an existing JAX workload.
 We are using JAX to train a linear regression model on a scikit-learn dataset.
 We will structure the example similar to our `PyTorch - From Centralized To Federated <https://github.com/adap/flower/blob/main/examples/pytorch-from-centralized-to-federated>`_ walkthrough.
diff --git a/doc/source/tutorial-quickstart-mxnet.rst b/doc/source/tutorial-quickstart-mxnet.rst
index 5a12a75c71d2..149d060e4c00 100644
--- a/doc/source/tutorial-quickstart-mxnet.rst
+++ b/doc/source/tutorial-quickstart-mxnet.rst
@@ -4,6 +4,9 @@
 Quickstart MXNet
 ================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with MXNet to train a Sequential model on MNIST.
+
 In this tutorial, we will learn how to train a :code:`Sequential` model on MNIST using Flower and MXNet. 
 
 It is recommended to create a virtual environment and run everything within this `virtualenv <https://flower.dev/docs/recommended-env-setup.html>`_. 
diff --git a/doc/source/tutorial-quickstart-pandas.rst b/doc/source/tutorial-quickstart-pandas.rst
index 63414a9f831c..bb9cb1b28b54 100644
--- a/doc/source/tutorial-quickstart-pandas.rst
+++ b/doc/source/tutorial-quickstart-pandas.rst
@@ -4,6 +4,9 @@
 Quickstart Pandas
 =================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with Pandas to perform Federated Analytics.
+
 Let's build a federated analytics system using Pandas and Flower!
 
 Please refer to the `full code example <https://github.com/adap/flower/tree/main/examples/quickstart-pandas>`_ to learn more.
diff --git a/doc/source/tutorial-quickstart-pytorch-lightning.rst b/doc/source/tutorial-quickstart-pytorch-lightning.rst
index bff8b4581c11..b8d5e50c4714 100644
--- a/doc/source/tutorial-quickstart-pytorch-lightning.rst
+++ b/doc/source/tutorial-quickstart-pytorch-lightning.rst
@@ -4,6 +4,9 @@
 Quickstart PyTorch Lightning
 ============================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch Lightning to train an Auto Encoder model on MNIST.
+
 Let's build a federated learning system using PyTorch Lightning and Flower!
 
 Please refer to the `full code example <https://github.com/adap/flower/tree/main/examples/quickstart-pytorch-lightning>`_ to learn more.
diff --git a/doc/source/tutorial-quickstart-pytorch.rst b/doc/source/tutorial-quickstart-pytorch.rst
index 238592b3ce18..fb77d107b63f 100644
--- a/doc/source/tutorial-quickstart-pytorch.rst
+++ b/doc/source/tutorial-quickstart-pytorch.rst
@@ -4,6 +4,9 @@
 Quickstart PyTorch
 ==================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with PyTorch to train a CNN model on MNIST.
+
 ..  youtube:: jOmmuzMIQ4c
    :width: 100%
 
diff --git a/doc/source/tutorial-quickstart-scikitlearn.rst b/doc/source/tutorial-quickstart-scikitlearn.rst
index 75814ebf2965..b33068e975fa 100644
--- a/doc/source/tutorial-quickstart-scikitlearn.rst
+++ b/doc/source/tutorial-quickstart-scikitlearn.rst
@@ -4,6 +4,9 @@
 Quickstart scikit-learn
 =======================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with scikit-learn to train a linear regression model.
+
 In this tutorial, we will learn how to train a :code:`Logistic Regression` model on MNIST using Flower and scikit-learn. 
 
 It is recommended to create a virtual environment and run everything within this `virtualenv <https://flower.dev/docs/recommended-env-setup.html>`_. 
diff --git a/doc/source/tutorial-quickstart-tensorflow.rst b/doc/source/tutorial-quickstart-tensorflow.rst
index cb0fa1caa723..64b2255a9ac6 100644
--- a/doc/source/tutorial-quickstart-tensorflow.rst
+++ b/doc/source/tutorial-quickstart-tensorflow.rst
@@ -4,6 +4,9 @@
 Quickstart TensorFlow
 =====================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with TensorFlow to train a MobilNetV2 model on CIFAR-10.
+
 ..  youtube:: FGTc2TQq7VM
    :width: 100%
 
diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst
index e8386dc3af7b..be7094614c63 100644
--- a/doc/source/tutorial-quickstart-xgboost.rst
+++ b/doc/source/tutorial-quickstart-xgboost.rst
@@ -4,6 +4,9 @@
 Quickstart XGBoost
 ==================
 
+.. meta::
+   :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees.
+
 Let's build a horizontal federated learning system using XGBoost and Flower!
 
 Please refer to the `full code example <https://github.com/adap/flower/tree/main/examples/quickstart-xgboost-horizontal>`_ to learn more.
diff --git a/examples/doc/source/_templates/base.html b/examples/doc/source/_templates/base.html
index 06762386f0d7..e4fe80720b74 100644
--- a/examples/doc/source/_templates/base.html
+++ b/examples/doc/source/_templates/base.html
@@ -5,6 +5,7 @@
     <meta charset="utf-8"/>
     <meta name="viewport" content="width=device-width,initial-scale=1"/>
     <meta name="color-scheme" content="light dark">
+    <link rel="canonical" href="https://flower.dev/docs/examples/{{ pagename }}.html">
 
     {%- if metatags %}{{ metatags }}{% endif -%}
 
diff --git a/examples/embedded-devices/_static/tmux_jtop_view.gif b/examples/embedded-devices/_static/tmux_jtop_view.gif
index cde8fe4388b5..7e92b586851a 100644
Binary files a/examples/embedded-devices/_static/tmux_jtop_view.gif and b/examples/embedded-devices/_static/tmux_jtop_view.gif differ
diff --git a/examples/embedded-devices/media/diagram.png b/examples/embedded-devices/media/diagram.png
deleted file mode 100644
index 66d8855c859f..000000000000
Binary files a/examples/embedded-devices/media/diagram.png and /dev/null differ
diff --git a/examples/embedded-devices/media/tmux_jtop_view.gif b/examples/embedded-devices/media/tmux_jtop_view.gif
deleted file mode 100644
index cde8fe4388b5..000000000000
Binary files a/examples/embedded-devices/media/tmux_jtop_view.gif and /dev/null differ
diff --git a/examples/quickstart-fastai/README.md b/examples/quickstart-fastai/README.md
index 3dd512bf6451..38ef23c95a1e 100644
--- a/examples/quickstart-fastai/README.md
+++ b/examples/quickstart-fastai/README.md
@@ -71,4 +71,4 @@ Start client 2 in the second terminal:
 python3 client.py
 ```
 
-You will see that fastai is starting a federated training. Have a look to the [Flower Quickstarter documentation](https://flower.dev/docs/quickstart-fastai.html) for a detailed explanation.
+You will see that fastai is starting a federated training. For a more in-depth look, be sure to check out the code on our [repo](https://github.com/adap/flower/tree/main/examples/quickstart-fastai).
diff --git a/examples/quickstart-huggingface/README.md b/examples/quickstart-huggingface/README.md
index f04d14f4f96a..c1e3cc4edc06 100644
--- a/examples/quickstart-huggingface/README.md
+++ b/examples/quickstart-huggingface/README.md
@@ -1,6 +1,6 @@
 # Federated HuggingFace Transformers using Flower and PyTorch
 
-This introductory example to using [HuggingFace](https://huggingface.co) Transformers with Flower with PyTorch. This example has been extended from the [quickstart-pytorch](https://flower.dev/docs/quickstart-pytorch.html) example. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for detailed explaination for the transformer pipeline.
+This introductory example to using [HuggingFace](https://huggingface.co) Transformers with Flower with PyTorch. This example has been extended from the [quickstart-pytorch](https://flower.dev/docs/examples/quickstart-pytorch.html) example. The training script closely follows the [HuggingFace course](https://huggingface.co/course/chapter3?fw=pt), so you are encouraged to check that out for detailed explaination for the transformer pipeline.
 
 Like `quickstart-pytorch`, running this example in itself is also meant to be quite easy.
 
diff --git a/examples/quickstart-pytorch/README.md b/examples/quickstart-pytorch/README.md
index 857d64a46f52..f748894f4971 100644
--- a/examples/quickstart-pytorch/README.md
+++ b/examples/quickstart-pytorch/README.md
@@ -70,4 +70,4 @@ Start client 2 in the second terminal:
 python3 client.py
 ```
 
-You will see that PyTorch is starting a federated training. Have a look to the [Flower Quickstarter documentation](https://flower.dev/docs/quickstart-pytorch.html) for a detailed explanation.
+You will see that PyTorch is starting a federated training. Look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-pytorch) for a detailed explanation.
diff --git a/examples/quickstart-tensorflow/README.md b/examples/quickstart-tensorflow/README.md
index 9c1e055f33b7..7ada48797d03 100644
--- a/examples/quickstart-tensorflow/README.md
+++ b/examples/quickstart-tensorflow/README.md
@@ -70,4 +70,4 @@ poetry run python3 client.py &
 poetry run python3 client.py
 ```
 
-You will see that Keras is starting a federated training. Have a look to the [Flower Quickstarter documentation](https://flower.dev/docs/quickstart-tensorflow.html) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development).
+You will see that Keras is starting a federated training. Have a look at the [code](https://github.com/adap/flower/tree/main/examples/quickstart-tensorflow) for a detailed explanation. You can add `steps_per_epoch=3` to `model.fit()` if you just want to evaluate that everything works without having to wait for the client-side training to finish (this will save you a lot of time during development).
diff --git a/src/py/flwr/driver/app.py b/src/py/flwr/driver/app.py
index dc0a7d92a34a..f45104aa359d 100644
--- a/src/py/flwr/driver/app.py
+++ b/src/py/flwr/driver/app.py
@@ -16,19 +16,23 @@
 
 
 import sys
+import threading
+import time
 from logging import INFO
-from typing import Optional
+from typing import Dict, Optional
 
 from flwr.common import EventType, event
 from flwr.common.address import parse_address
 from flwr.common.logger import log
+from flwr.proto import driver_pb2
 from flwr.server.app import ServerConfig, init_defaults, run_fl
+from flwr.server.client_manager import ClientManager
 from flwr.server.history import History
 from flwr.server.server import Server
 from flwr.server.strategy import Strategy
 
 from .driver import Driver
-from .driver_client_manager import DriverClientManager
+from .driver_client_proxy import DriverClientProxy
 
 DEFAULT_SERVER_ADDRESS_DRIVER = "[::]:9091"
 
@@ -40,13 +44,13 @@
 """
 
 
-def start_driver(  # pylint: disable=too-many-arguments
+def start_driver(  # pylint: disable=too-many-arguments, too-many-locals
     *,
     server_address: str = DEFAULT_SERVER_ADDRESS_DRIVER,
     server: Optional[Server] = None,
     config: Optional[ServerConfig] = None,
     strategy: Optional[Strategy] = None,
-    client_manager: Optional[DriverClientManager] = None,
+    client_manager: Optional[ClientManager] = None,
     certificates: Optional[bytes] = None,
 ) -> History:
     """Start a Flower Driver API server.
@@ -107,12 +111,10 @@ def start_driver(  # pylint: disable=too-many-arguments
     host, port, is_v6 = parsed_address
     address = f"[{host}]:{port}" if is_v6 else f"{host}:{port}"
 
-    # Create the Driver and DriverClientManager if None is provided
-    if client_manager is None:
-        driver = Driver(driver_service_address=address, certificates=certificates)
-        driver.connect()
-
-        client_manager = DriverClientManager(driver=driver)
+    # Create the Driver
+    driver = Driver(driver_service_address=address, certificates=certificates)
+    driver.connect()
+    lock = threading.Lock()
 
     # Initialize the Driver API server and config
     initialized_server, initialized_config = init_defaults(
@@ -127,15 +129,83 @@ def start_driver(  # pylint: disable=too-many-arguments
         initialized_config,
     )
 
+    # Start the thread updating nodes
+    thread = threading.Thread(
+        target=update_client_manager,
+        args=(
+            driver,
+            initialized_server.client_manager(),
+            lock,
+        ),
+    )
+    thread.start()
+
     # Start training
     hist = run_fl(
         server=initialized_server,
         config=initialized_config,
     )
 
-    # Stop the Driver API server
-    client_manager.driver.disconnect()
+    # Stop the Driver API server and the thread
+    with lock:
+        driver.disconnect()
+    thread.join()
 
     event(EventType.START_SERVER_LEAVE)
 
     return hist
+
+
+def update_client_manager(
+    driver: Driver,
+    client_manager: ClientManager,
+    lock: threading.Lock,
+) -> None:
+    """Update the nodes list in the client manager.
+
+    This function periodically communicates with the associated driver to get all
+    node_ids. Each node_id is then converted into a `DriverClientProxy` instance
+    and stored in the `registered_nodes` dictionary with node_id as key.
+
+    New nodes will be added to the ClientManager via `client_manager.register()`,
+    and dead nodes will be removed from the ClientManager via
+    `client_manager.unregister()`.
+    """
+    # Request for workload_id
+    workload_id = driver.create_workload(driver_pb2.CreateWorkloadRequest()).workload_id
+
+    # Loop until the driver is disconnected
+    registered_nodes: Dict[int, DriverClientProxy] = {}
+    while True:
+        with lock:
+            # End the while loop if the driver is disconnected
+            if driver.stub is None:
+                break
+            get_nodes_res = driver.get_nodes(
+                req=driver_pb2.GetNodesRequest(workload_id=workload_id)
+            )
+        all_node_ids = {node.node_id for node in get_nodes_res.nodes}
+        dead_nodes = set(registered_nodes).difference(all_node_ids)
+        new_nodes = all_node_ids.difference(registered_nodes)
+
+        # Unregister dead nodes
+        for node_id in dead_nodes:
+            client_proxy = registered_nodes[node_id]
+            client_manager.unregister(client_proxy)
+            del registered_nodes[node_id]
+
+        # Register new nodes
+        for node_id in new_nodes:
+            client_proxy = DriverClientProxy(
+                node_id=node_id,
+                driver=driver,
+                anonymous=False,
+                workload_id=workload_id,
+            )
+            if client_manager.register(client_proxy):
+                registered_nodes[node_id] = client_proxy
+            else:
+                raise RuntimeError("Could not register node.")
+
+        # Sleep for 3 seconds
+        time.sleep(3)
diff --git a/src/py/flwr/driver/app_test.py b/src/py/flwr/driver/app_test.py
new file mode 100644
index 000000000000..792bd84b6106
--- /dev/null
+++ b/src/py/flwr/driver/app_test.py
@@ -0,0 +1,84 @@
+# Copyright 2022 Adap GmbH. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Flower Driver app tests."""
+# pylint: disable=no-self-use
+
+
+import threading
+import time
+import unittest
+from unittest.mock import MagicMock
+
+from flwr.driver.app import update_client_manager
+from flwr.proto.driver_pb2 import CreateWorkloadResponse, GetNodesResponse
+from flwr.proto.node_pb2 import Node
+from flwr.server.client_manager import SimpleClientManager
+
+
+class TestClientManagerWithDriver(unittest.TestCase):
+    """Tests for ClientManager.
+
+    Considering multi-threading, all tests assume that the `update_client_manager()`
+    updates the ClientManager every 3 seconds.
+    """
+
+    def test_simple_client_manager_update(self) -> None:
+        """Tests if the node update works correctly."""
+        # Prepare
+        expected_nodes = [Node(node_id=i, anonymous=False) for i in range(100)]
+        expected_updated_nodes = [
+            Node(node_id=i, anonymous=False) for i in range(80, 120)
+        ]
+        driver = MagicMock()
+        driver.stub = "driver stub"
+        driver.create_workload.return_value = CreateWorkloadResponse(workload_id="1")
+        driver.get_nodes.return_value = GetNodesResponse(nodes=expected_nodes)
+        client_manager = SimpleClientManager()
+        lock = threading.Lock()
+
+        # Execute
+        thread = threading.Thread(
+            target=update_client_manager,
+            args=(
+                driver,
+                client_manager,
+                lock,
+            ),
+            daemon=True,
+        )
+        thread.start()
+        # Wait until all nodes are registered via `client_manager.sample()`
+        client_manager.sample(len(expected_nodes))
+        # Retrieve all nodes in `client_manager`
+        node_ids = {proxy.node_id for proxy in client_manager.all().values()}
+        # Update the GetNodesResponse and wait until the `client_manager` is updated
+        driver.get_nodes.return_value = GetNodesResponse(nodes=expected_updated_nodes)
+        while True:
+            with lock:
+                if len(client_manager.all()) == len(expected_updated_nodes):
+                    break
+            time.sleep(1.3)
+        # Retrieve all nodes in `client_manager`
+        updated_node_ids = {proxy.node_id for proxy in client_manager.all().values()}
+        # Simulate `driver.disconnect()`
+        driver.stub = None
+
+        # Assert
+        driver.create_workload.assert_called_once()
+        assert node_ids == {node.node_id for node in expected_nodes}
+        assert updated_node_ids == {node.node_id for node in expected_updated_nodes}
+
+        # Exit
+        thread.join()
diff --git a/src/py/flwr/driver/driver_client_manager.py b/src/py/flwr/driver/driver_client_manager.py
deleted file mode 100644
index ed886a27c9f4..000000000000
--- a/src/py/flwr/driver/driver_client_manager.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# Copyright 2020 Adap GmbH. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-"""Flower DriverClientManager."""
-
-
-import random
-import time
-from logging import INFO
-from typing import Dict, List, Optional
-
-from flwr.common.logger import log
-from flwr.proto import driver_pb2
-from flwr.server.client_manager import ClientManager
-from flwr.server.client_proxy import ClientProxy
-from flwr.server.criterion import Criterion
-
-from .driver import Driver
-from .driver_client_proxy import DriverClientProxy
-
-
-class DriverClientManager(ClientManager):
-    """Provides a pool of available clients."""
-
-    def __init__(self, driver: Driver) -> None:
-        self.driver = driver
-        self.workload_id = ""
-        self.clients: Dict[str, ClientProxy] = {}
-
-    def __len__(self) -> int:
-        """Return the number of available clients.
-
-        Returns
-        -------
-        num_available : int
-            The number of currently available clients.
-        """
-        self._update_nodes()
-        return len(self.clients)
-
-    def num_available(self) -> int:
-        """Return the number of available clients.
-
-        Returns
-        -------
-        num_available : int
-            The number of currently available clients.
-        """
-        return len(self)
-
-    def register(self, client: ClientProxy) -> bool:
-        """Register Flower ClientProxy instance.
-
-        Parameters
-        ----------
-        client : flwr.server.client_proxy.ClientProxy
-
-        Returns
-        -------
-        success : bool
-            Indicating if registration was successful. False if ClientProxy is
-            already registered or can not be registered for any reason.
-        """
-        raise NotImplementedError("DriverClientManager.register is not implemented")
-
-    def unregister(self, client: ClientProxy) -> None:
-        """Unregister Flower ClientProxy instance.
-
-        This method is idempotent.
-
-        Parameters
-        ----------
-        client : flwr.server.client_proxy.ClientProxy
-        """
-        raise NotImplementedError("DriverClientManager.unregister is not implemented")
-
-    def all(self) -> Dict[str, ClientProxy]:
-        """Return all available clients."""
-        self._update_nodes()
-        return self.clients
-
-    def wait_for(self, num_clients: int, timeout: int = 86400) -> bool:
-        """Wait until at least `num_clients` are available."""
-        start_time = time.time()
-        while time.time() < start_time + timeout:
-            self._update_nodes()
-            if len(self.clients) >= num_clients:
-                return True
-            time.sleep(1)
-        return False
-
-    def sample(
-        self,
-        num_clients: int,
-        min_num_clients: Optional[int] = None,
-        criterion: Optional[Criterion] = None,
-    ) -> List[ClientProxy]:
-        """Sample a number of Flower ClientProxy instances."""
-        if min_num_clients is None:
-            min_num_clients = num_clients
-        self.wait_for(min_num_clients)
-
-        available_cids = list(self.clients)
-
-        if criterion is not None:
-            available_cids = [
-                cid for cid in available_cids if criterion.select(self.clients[cid])
-            ]
-
-        if num_clients > len(available_cids):
-            log(
-                INFO,
-                "Sampling failed: number of available clients"
-                " (%s) is less than number of requested clients (%s).",
-                len(available_cids),
-                num_clients,
-            )
-            return []
-
-        sampled_cids = random.sample(available_cids, num_clients)
-        return [self.clients[cid] for cid in sampled_cids]
-
-    def _update_nodes(self) -> None:
-        """Update the nodes list in the client manager.
-
-        This method communicates with the associated driver to get all node ids. Each
-        node id is then converted into a `DriverClientProxy` instance and stored in the
-        `clients` dictionary with node id as key.
-        """
-        if self.workload_id == "":
-            self.workload_id = self.driver.create_workload(
-                driver_pb2.CreateWorkloadRequest()
-            ).workload_id
-        get_nodes_res = self.driver.get_nodes(
-            req=driver_pb2.GetNodesRequest(workload_id=self.workload_id)
-        )
-        all_nodes = get_nodes_res.nodes
-        for node in all_nodes:
-            self.clients[str(node.node_id)] = DriverClientProxy(
-                node_id=node.node_id,
-                driver=self.driver,
-                anonymous=False,
-                workload_id=self.workload_id,
-            )