From 5cf5c827abd5ba70cab0c9dce306733780920da6 Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Fri, 17 Nov 2023 12:48:21 +0000 Subject: [PATCH 01/11] Create XGBoost-quickstart example (#2612) Co-authored-by: yan-gao-GY --- examples/xgboost-quickstart/README.md | 86 +++++++++ examples/xgboost-quickstart/client.py | 173 +++++++++++++++++++ examples/xgboost-quickstart/pyproject.toml | 15 ++ examples/xgboost-quickstart/requirements.txt | 3 + examples/xgboost-quickstart/run.sh | 17 ++ examples/xgboost-quickstart/server.py | 37 ++++ 6 files changed, 331 insertions(+) create mode 100644 examples/xgboost-quickstart/README.md create mode 100644 examples/xgboost-quickstart/client.py create mode 100644 examples/xgboost-quickstart/pyproject.toml create mode 100644 examples/xgboost-quickstart/requirements.txt create mode 100755 examples/xgboost-quickstart/run.sh create mode 100644 examples/xgboost-quickstart/server.py diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md new file mode 100644 index 000000000000..53cd37e18aa3 --- /dev/null +++ b/examples/xgboost-quickstart/README.md @@ -0,0 +1,86 @@ +# Flower Example using XGBoost + +This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) within Flower using `xgboost` package. +Tree-based with bagging method is used for aggregation on the server. + +This project provides a minimal code example to enable you to get stated quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). + +## Project Setup + +Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: + +```shell +git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/xgboost-quickstart . && rm -rf flower && cd xgboost-quickstart +``` + +This will create a new directory called `xgboost-quickstart` containing the following files: + +``` +-- README.md <- Your're reading this right now +-- server.py <- Defines the server-side logic +-- client.py <- Defines the client-side logic +-- pyproject.toml <- Example dependencies (if you use Poetry) +-- requirements.txt <- Example dependencies +``` + +### Installing Dependencies + +Project dependencies (such as `xgboost` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. + +#### Poetry + +```shell +poetry install +poetry shell +``` + +Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: + +```shell +poetry run python3 -c "import flwr" +``` + +If you don't see any errors you're good to go! + +#### pip + +Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. + +```shell +pip install -r requirements.txt +``` + +## Run Federated Learning with XGBoost and Flower + +Afterwards you are ready to start the Flower server as well as the clients. +You can simply start the server in a terminal as follows: + +```shell +python3 server.py +``` + +Now you are ready to start the Flower clients which will participate in the learning. +To do so simply open two more terminal windows and run the following commands. + +Start client 1 in the first terminal: + +```shell +python3 client.py --node-id=0 +``` + +Start client 2 in the second terminal: + +```shell +python3 client.py --node-id=1 +``` + +You will see that XGBoost is starting a federated training. + +Alternatively, you can use `run.sh` to run the same experiment in a single terminal as follows: + +```shell +bash run.sh +``` + +Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) +and [tutorial](https://flower.dev/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py new file mode 100644 index 000000000000..ede4a2bba764 --- /dev/null +++ b/examples/xgboost-quickstart/client.py @@ -0,0 +1,173 @@ +import argparse +import warnings +from typing import Union +from logging import INFO +from datasets import Dataset, DatasetDict +import xgboost as xgb + +import flwr as fl +from flwr_datasets import FederatedDataset +from flwr.common.logger import log +from flwr.common import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + GetParametersIns, + GetParametersRes, + Parameters, + Status, +) +from flwr_datasets.partitioner import IidPartitioner + + +warnings.filterwarnings("ignore", category=UserWarning) + +# Define arguments parser for the client/node ID. +parser = argparse.ArgumentParser() +parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", +) +args = parser.parse_args() + + +# Define data partitioning related functions +def train_test_split(partition: Dataset, test_fraction: float, seed: int): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + +def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + + +# Load (HIGGS) dataset and conduct partitioning +partitioner = IidPartitioner(num_partitions=2) +fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) + +# Load the partition for this `node_id` +partition = fds.load_partition(idx=args.node_id, split="train") +partition.set_format("numpy") + +# Train/test splitting +train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=0.2, seed=42 +) + +# Reformat data to DMatrix for xgboost +train_dmatrix = transform_dataset_to_dmatrix(train_data) +valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + +# Hyper-parameters for xgboost training +num_local_round = 1 +params = { + "objective": "binary:logistic", + "eta": 0.1, # Learning rate + "max_depth": 8, + "eval_metric": "auc", + "nthread": 16, + "num_parallel_tree": 1, + "subsample": 1, + "tree_method": "hist", +} + + +# Define Flower client +class FlowerClient(fl.client.Client): + def __init__(self): + self.bst = None + self.config = None + + def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: + _ = (self, ins) + return GetParametersRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[]), + ) + + def _local_boost(self): + # Update trees based on local training data. + for i in range(num_local_round): + self.bst.update(train_dmatrix, self.bst.num_boosted_rounds()) + + # Extract the last N=num_local_round trees for sever aggregation + bst = self.bst[ + self.bst.num_boosted_rounds() + - num_local_round : self.bst.num_boosted_rounds() + ] + + return bst + + def fit(self, ins: FitIns) -> FitRes: + if not self.bst: + # First round local training + log(INFO, "Start training at round 1") + bst = xgb.train( + params, + train_dmatrix, + num_boost_round=num_local_round, + evals=[(valid_dmatrix, "validate"), (train_dmatrix, "train")], + ) + self.config = bst.save_config() + self.bst = bst + else: + for item in ins.parameters.tensors: + global_model = bytearray(item) + + # Load global model into booster + self.bst.load_model(global_model) + self.bst.load_config(self.config) + + bst = self._local_boost() + + local_model = bst.save_raw("json") + local_model_bytes = bytes(local_model) + + return FitRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), + num_examples=num_train, + metrics={}, + ) + + def evaluate(self, ins: EvaluateIns) -> EvaluateRes: + eval_results = self.bst.eval_set( + evals=[(valid_dmatrix, "valid")], + iteration=self.bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + return EvaluateRes( + status=Status( + code=Code.OK, + message="OK", + ), + loss=0.0, + num_examples=num_val, + metrics={"AUC": auc}, + ) + + +# Start Flower client +fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient()) diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml new file mode 100644 index 000000000000..74256846c693 --- /dev/null +++ b/examples/xgboost-quickstart/pyproject.toml @@ -0,0 +1,15 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "xgboost-quickstart" +version = "0.1.0" +description = "Federated XGBoost with Flower (quickstart)" +authors = ["The Flower Authors "] + +[tool.poetry.dependencies] +python = ">=3.8,<3.11" +flwr = ">=1.0,<2.0" +flwr-datasets = ">=0.0.1,<1.0.0" +xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-quickstart/requirements.txt b/examples/xgboost-quickstart/requirements.txt new file mode 100644 index 000000000000..9596a8d6cd02 --- /dev/null +++ b/examples/xgboost-quickstart/requirements.txt @@ -0,0 +1,3 @@ +flwr>=1.0, <2.0 +flwr-datasets>=0.0.1, <1.0.0 +xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-quickstart/run.sh b/examples/xgboost-quickstart/run.sh new file mode 100755 index 000000000000..6287145bfb5f --- /dev/null +++ b/examples/xgboost-quickstart/run.sh @@ -0,0 +1,17 @@ +#!/bin/bash +set -e +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ + +echo "Starting server" +python server.py & +sleep 5 # Sleep for 5s to give the server enough time to start + +for i in `seq 0 1`; do + echo "Starting client $i" + python3 client.py --node-id=$i & +done + +# Enable CTRL+C to stop all background processes +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM +# Wait for all background processes to complete +wait diff --git a/examples/xgboost-quickstart/server.py b/examples/xgboost-quickstart/server.py new file mode 100644 index 000000000000..b45a375ce94f --- /dev/null +++ b/examples/xgboost-quickstart/server.py @@ -0,0 +1,37 @@ +import flwr as fl +from flwr.server.strategy import FedXgbBagging + + +# FL experimental settings +pool_size = 2 +num_rounds = 5 +num_clients_per_round = 2 +num_evaluate_clients = 2 + + +def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + + +# Define strategy +strategy = FedXgbBagging( + fraction_fit=(float(num_clients_per_round) / pool_size), + min_fit_clients=num_clients_per_round, + min_available_clients=pool_size, + min_evaluate_clients=num_evaluate_clients, + fraction_evaluate=1.0, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, +) + +# Start Flower server +fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=num_rounds), + strategy=strategy, +) From e8c94659e5b855999aaa1ca715b181aed291621b Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Sun, 19 Nov 2023 19:31:10 +0000 Subject: [PATCH 02/11] Update xgboost-comprehensive example and some small fixes (#2617) Co-authored-by: yan-gao-GY --- examples/xgboost-comprehensive/README.md | 62 ++++++++++-------- .../_static/xgboost_flower_auc.png | Bin 0 -> 68480 bytes examples/xgboost-comprehensive/client.py | 19 ++---- examples/xgboost-comprehensive/run.sh | 6 +- examples/xgboost-comprehensive/server.py | 13 +--- examples/xgboost-comprehensive/utils.py | 12 ++++ 6 files changed, 56 insertions(+), 56 deletions(-) create mode 100644 examples/xgboost-comprehensive/_static/xgboost_flower_auc.png diff --git a/examples/xgboost-comprehensive/README.md b/examples/xgboost-comprehensive/README.md index 3b31e23cb321..783438188dab 100644 --- a/examples/xgboost-comprehensive/README.md +++ b/examples/xgboost-comprehensive/README.md @@ -1,7 +1,13 @@ -# Flower Example using XGBoost +# Flower Example using XGBoost (Comprehensive) -This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) within Flower using `xgboost` package. -Tree-based with bagging method is used for aggregation on the server. +This example demonstrates a comprehensive federated learning setup using Flower with XGBoost. +It differs from the quickstart example in the following ways: + +- Arguments parsers of server and clients for hyperparameters selection. +- Customised FL settings. +- Customised number of partitions. +- Customised partitioner type (uniform, linear, square, exponential). +- Centralised/distributed evaluation. ## Project Setup @@ -18,7 +24,8 @@ This will create a new directory called `xgboost-comprehensive` containing the f -- server.py <- Defines the server-side logic -- client.py <- Defines the client-side logic -- dataset.py <- Defines the functions of data loading and partitioning --- utils.py <- Defines the arguments parser for clients and server. +-- utils.py <- Defines the arguments parser for clients and server +-- run.sh <- Commands to run experiments -- pyproject.toml <- Example dependencies (if you use Poetry) -- requirements.txt <- Example dependencies ``` @@ -52,36 +59,35 @@ pip install -r requirements.txt ## Run Federated Learning with XGBoost and Flower -Afterwards you are ready to start the Flower server as well as the clients. -You can simply start the server in a terminal as follows: +The included `run.sh` will start the Flower server (using `server.py`) with centralised evaluation, +sleep for 15 seconds to ensure that the server is up, +and then start 5 Flower clients (using `client.py`) with a small subset of the data from exponential partition distribution. +You can simply start everything in a terminal as follows: ```shell -python3 server.py +poetry run ./run.sh ``` -Now you are ready to start the Flower clients which will participate in the learning. -To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: +The `run.sh` script starts processes in the background so that you don't have to open eleven terminal windows. +If you experiment with the code example and something goes wrong, simply using `CTRL + C` on Linux (or `CMD + C` on macOS) wouldn't normally kill all these processes, +which is why the script ends with `trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT` and `wait`. +This simply allows you to stop the experiment using `CTRL + C` (or `CMD + C`). +If you change the script and anything goes wrong you can still use `killall python` (or `killall python3`) +to kill all background processes (or a more specific command if you have other Python processes running that you don't want to kill). -```shell -python3 client.py --node-id=0 -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py --node-id=1 -``` +You can also manually run `poetry run python3 server.py --pool-size=N --num-clients-per-round=N` +and `poetry run python3 client.py --node-id=NODE_ID --num-partitions=N` for as many clients as you want, +but you have to make sure that each command is run in a different terminal window (or a different computer on the network). -You will see that XGBoost is starting a federated training. +In addition, we provide more options to customise the experimental settings, including data partitioning and centralised/distributed evaluation (see `utils.py`). +Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) +and [tutorial](https://flower.dev/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. -Alternatively, you can use `run.sh` to run the same experiment in a single terminal as follows: +### Expected Experimental Results -```shell -bash run.sh -``` +![](_static/xgboost_flower_auc.png) -Besides, we provide options to customise the experimental settings, including data partitioning and centralised/distributed evaluation (see `utils.py`). -Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive) -and [tutorial](https://flower.dev/docs/framework/tutorial-quickstart-xgboost.html) for a detailed explanation. +The figure above shows the centralised tested AUC performance over FL rounds on 4 experimental settings. +One can see that all settings obtain stable performance boost over FL rounds (especially noticeable at the start of training). +As expected, uniform client distribution shows higher AUC values (beyond 83% at the end) than square/exponential setup. +Feel free to explore more interesting experiments by yourself! diff --git a/examples/xgboost-comprehensive/_static/xgboost_flower_auc.png b/examples/xgboost-comprehensive/_static/xgboost_flower_auc.png new file mode 100644 index 0000000000000000000000000000000000000000..e6a4bfb83250cb280b435b364adb4dd2fed0f414 GIT binary patch literal 68480 zcmeEuWl$Y$(K|w*Gew39|g@Ss!4+Zr`8vzdZ%?0Tw zHSosoCZ+AB=4k2WY2so5rC{RbWasE+XKhOEVd3Ix?dZVC!pFkSME=>$&B;}OmDT>g zPhfF$v0|Mr!x92M1kp)W#}x_+-2`$#{}wH>hJuEI`Y0)`?v-`C>gkuf`}}$_uR}oV zEB=n$1nU&b)0}SnhS%s-ZGS-gC4T~l>fed z1hM@4%@!8+q}0DB_yxUbj`;V5t}KR!`OoVD8fpJOeiyg&WFg_LPN08>aGo}Te0|mA`W5irb||> zXJf@T{bhD5pG-M}pZ3qyx*7?!zsobpMP&ayZRd@K-h@m2<~$f-kvrHud0r7zTP~V0 zaSpmS11(iAAH(Rr1-|Co;Vh581ni}0``YR88u)$t&po0ucwi^$d_Q<*aKX-Qng1() zFJhbdWonM>rzUht5Mln@`c^!m+IEE%>Thlgtw%$-Zw)$jvCX*zoX~SnWtLJ5)*?O` z|5!vf@{f~C$vw7rT&vBdVD0e+CAs}#$*g%rJ%=w&C6d!ns|dyab^ z_qWSn7oWbHA8N|9j`wkB5f!+D&rAXKt9I6wEFf`B6@nsUhK>NfI^+ONS3O>Z|62XJ zjCWR%yvDvE9@fd;3mV;?wztNhej(fNo~{~;k;a$9qw2+(KdWm5z~zf;Io$9x(Kj^K zt?-gJMM^c-f3HpThQl`~Z1-8w|C4oO6ZuvZYvgKeQ5UVEa0TyX-SgmJ!~u zpSCr6Jo}ukvZk{v^@c~jMn(i7oPBdV6FO$->71}bzCqTw;3!UA3QL>E=|ujN=@{#o z*(WJqCCW~r(ax|o5C+G!?7!2U|IZ4zK(nKSP|75Lz|Dgy>Xk@4R(PLA7)Na9DE7J< zHf*FOTEzv)OLX_+P$FiUr&S#tKwZVbs;ae7Lv)`PFs^<@AYD=kEaWQYzqrGlr(pzk zzp?*I{hce~*nDK6t7#n(7_PRAfM z)^N<{iW{+aq6B@NDKk~AiIN>-prkHMn<=9Nu0(I!&3081{9Mlc#>4)MD&(n3hl}Kc zZrDFIkws?oz;-R$z~{H%pY^GnXtMN3HbWXl9%P+xyzl@FeU^1>++hruo2yq}H|Edo zuZ7JYjiXX3O^I6^csf!R8o`a=$LeEK;);Ivz+BsihxE$*s5=i)DJpAUA0!Ce$PgFz zhL%B&b76YLhrDsU(hVR9RdUL4=>0_9eA(olAHPK|ub0)$sN*Vy;LBkoK{QCEeq7`B*r#ALb?{&Ul{ORZ-`kUsnPN5X{rvFrt1H{LYDZpGQ)`Kd|Jxl4SiW)1`Y7+&pPe?yYl}Y@8&R|p&lVTg(!Wl^y!#*|-l`|d z>qv)Q1Iyi0^v=3*%Y`-_c9(d9xgu*mTZg}+rFZJyN|a-;a=dAEs_W(JvOUa$9^z%K z@g3;h=i1*Oy+Tp}H!&pz1&h%%_QJwKC}YqvJtL#ppI-`)dkaOztMR$90Wc|{+iFb> z$1nNx?&+U$ZOg$SA@r=QwEX<(J3HokW0?b4{I2(XtN|+9Pk2Ut2}+{RGmj#4bO?cg zfr_~zV@vh6IxQZ|fe#}_+UwJy3GxoW#=jmIITD&Q-SDoumeeOro3 zi++b(w4n`+#gZ)+e@^O#dQgJ2J8eVE!mAUyiz6{AH_=q#sp8%9wz1Fdhpi-eq>A`^ zNw{}-xUxWyn4$>ih4J0k5XAA>u^?Esy<> zicaIU#nl5jSp2;{nC*7)f7(P3JP*5iJnP6U$_>b1F=*cKXjyD_XN(v5OLsI^zFj>) zx_ww$Y1tcrIXh0q>p(nj;7Ouo=uO?`dq?yBJxowp8Pl525p#~;<;OT;{_Zi}_0jQc zLH5iWyuyeGeLj>P@=B)oncx|l(7Eximn$_ZH0wP!HSY*8KGwC}-ZYj5EZX1NfCY%r zQnC&E(<;oW7P-KB8RKjyF3}|agpbeKARdAc1S3^$ZCsmuC1IziDU~<%3Q^q` zfCpYI6xUEV`k)}}8zc8~xMTxb19yTOj2RI74t$+oimt{#s`8>(}E zs=#|US*vw<%#xEzI%XqLNhIHPz5*K;x5n#YixB#GN`|cbrddTrMd*4`;{Nf`^{nl# z#%h9;$8MqXw@LxwD8>MT)!g9WUH6al|ITilQRz(<L`k}HEMfB?ySE3xMz{T z+)W(XEp6wotzgO%jc}CxzP`RPqYl!bkBKz1J~2b)rdz#{quGL~$&6Y&`+tRfZ_Urv z+U(X^o93Rqy}jSl(!O;MzjuB3dsNw0>3efHyKw~=Gb4KBWJ~>``0yw|3+7@Qc7R9- zZ^e!S3lAGiUxWL7gSjpR<9lAy+Ljth&xU#cfv@78cgTGX)?@uU4VEPAENR_D z02!gN2DfVhTAl>lHQ=FJ9=-w5{GTWD1mXp2S%rnOkB{vrsi=ija=D$yUsRNmazJ$B8Ez*|>QKK!j8##EY?G)j5)7!zFMV>GaCLqpG!1nr!U5^?9J z6opDLxPCe)12tno*QK-Tyoj#&)x3|QFYP^)7PU&!ri-DF*{XO6X=cO#?VvjxGE z`F-o|?zV9qlarGh8W}10{d+&lHeHViSHSZW#Vf2YHN__kQ8Aq(6fkBV$2X z*(F)Z)01E475dknsVdU=J&(N?`&BgSTu80sb`2h2Ls7)dea}yu{>=Z_wyuPo&G$N_ zaj*AufP0({zs6lydgl)M=HP~Q9Mh*iz?yHc;Z(ma`WW}}R7bn3$g3l@yN0x%jx&;I zft%vmTon@&%n@gi`fWC1cwa?4;5r^TKhQ4X&i9VamsxmAI@U^hB92R1PDs=T*FS!U zEoP6AU{SI4W=?O#8=MVA%;yxe*5*6!vr?i_n#N}G25Q~!N};@O#|FrLLrL^%+CYK; zmJ-a{@qiivhx((i@GB}2&-qTGlFLRXw9oDFkd~3Z`?%o~ClExLnPUT=w#izrr{x;; zSzcc5I<9JSpO3S^V>!Z^;2htI{#y_bhqF4eFfbF4Lx2il$v-2pBC)V#n*Tsx=;Qpy zPqBZ;Q15!;>Ey5z_GEvI;;b{PNTwGc@W7V7YpzRIrOjdq9aY6zwD@+$rm_+ASYMo6I9U2GKkZB@SAS@<97WrSh)JYq3_fyK1bzgW0nnei``Oq2I}DI`kOq@qq*48 z(NUh)+{sO+Dw|n`{&=!V6T^SYUY41E+yFb<_~@YAPOvR@Ri}kN$rGXI@u}y}iKd`G z)!%8}R#wOxXBq8ka0?~@Msc^gRg-cqnNgD7F*$e30w=ssOIiWWx>{}Y5Q|MtPRDru z%X?|fGDK2qS_+PDy>xGLGIC}_n6 z84(eYSlB0N-q1Jp^|9l%^hz6m6i?63wsxRJJCQ5&5Y~FBB{E^rY3!5d2j?b(_u%)n z)Mm(foDDTMGnyQqIL3VqX?mWJ@gM zRV5CNjF5fQIHt4I+GBAfUk~sRl#2Ty5D!XYx<)t>?-&etFZF(kwTA4O@)46jxg+q8 zWJU;(V6l@l7+W=Od`)zEkT5<-ReGvptQH)l}iPzWs;$$gKMeVn0q+q~*v zHS=4=@A+n)$*4VRfGn`JpFPZ^%Crv&>P+bRFIy&~FPW;E8UdTJSouJ5wKh4 zmIRH|)q8_sk)iN8%n`)J#n=25i?B*>R$g^p9@k&L=j5Pu0)KP3%7ck{VGw5GRJTE= zr>KFVHL>vNQ@}9)1W>t|c!4+9#MWqkpliB@KNlJ3Mw;MfvCx^_4f)+>4O&eAZ#jct zSxrpTVmov8%zZ*kZRQ|a>7@fw5=qctCl7%S3+42jz;VZx=ej>{;Pa8uVv{Q^5N{Z- zz!P8qS@oTGT)jM=aoWx?LD48=^w!TC>Yc2VBu~W?^MANIU2S`OhlQ0c+rj(IacJR#hxIN0!~xD+v`MzG zi65|HZiuHpOj@UZ=sSc^rH39@AZ+<6?$q9yi=>C%=%tKWvQII0V>M$H9J|GrJgrev zV(7(E4@pW2G@ubpmTNMS1wOHx4a8AXQ-?Zl_nUy0om*DdZu-MfiTb-kk%|0Hys!4y zowp=;9M?vHI%WtsB3B)XT3d;WYF$2WZO4lm+MYel8wHL52dbEu7*tnR7d8%#imK|C z@ZOJTmaQIiQ6aFvAtmLOU=*>GooL&t-l)CI9@?iYYIjmcZao5Q?g*3YaCRn3{2l>R zDk1XL8$u?MPvXTrg2bzwTx}80MM#UeIZDMn{%cpyWtQJfCGauuKD{<7`}BK-6%2&J z&eX6p0;&IJ51d=j>1s=@*#O$!XnN0gYB@fa9R!}tSirU}10+u#!}g%nu)ZJpMwFE8bfxd3pBEn&Q)}b>7atdL+j5hh zuves4#c6wSyC)ST+9c-SrBUB^5>7^yryv^`R)-`pxb`rDBk*QhWQfVT9n6gV@ZWAp z5Mba92i+_CNW;@dL`QIippi^=shOWNfpf0VbSmQ)YppnaZ;xF`&FXd2`CVxMxMb(# z`%5Wz{QBSzky}ngM4bp|bMNzu%I&V#hx>H4faaZR?}=E{C=0Km&794{?C>eUR!|Cm zA`L*c9RIkq8A;N3&e2;rrycCLx5YeUvQiBFC%&9zDw zUupcK2$=1Lv&@pwzx$Exy=4R_Pfy=TpRrDth06-5?x#9IQ~*s-ktMF$B#K;$H#kxT z9`m@83`a^*H}Pc-2NRQD&MfSAF6JTLC)Gl$u?rxK|CPEfu+mnt!_{M~(`Ug3Ho8zz zuU^(kXYm`Lv~cEXN!z;YXVyuT5C_y=ya{Q^}+r_48&*FdgYr96RI zPzOmD&Ni)X(B1ka<9+Z}>C@^vTHEfLSeq-yCi{mPB#}-ih&WRUnD|?FEt%#FcauGHBYQE7E>Hf6OWVr2(WxUuUol#{eS?3<2+sGhQbBqW@(rb)n!EDC>;C=PLJBiGowtTaBMQ&axl9BGZJ696Gcapbd+AaUlMtM_TaCGGK+ zdo~Hz+2%+>&GDc2JmF7t2vb6^Q;~kH#=vc9X zfa0OCW+YjE?ZJfuWZ`lY3o@-Ow$eWwIN{;bP#*qrEi{+KR(<=NeXPJ*c876e9LV^w zngS)JQop8532Z!uj<>G7jB!>I9JP_HLeN?oz&ifb2YbSford#1)N9Xq`PkTD=T**< z=zjiP-o%3df}MCWcrm!M#Hj4!9|*f8+oGy%IQT-WBn~<$0@ZHz!nusN*b4J>zU!o} zPGu_@p{qD{p+=-YOp~V}MuQ*mV6t5elWW;RJ3?O! zvIf1h1YuppTWZH675ayEMz&q`wT8E? z6RVRmnlac5bD&0!&U3ayX3~^rsz}YLo^8!YVrJJaZ1W&?T!E^Fjo?o{lv65?h;nXG zigYyRGI=8>jAzNCud(^fWVrw-<;_2>O#AUo53dx#Qo?@M+XSr?&o$Q&`!0P_&n8f+ z5D#4gjxN&)kWq;R40?Yh+420^siNo3oGIIaGw7Kx{iSD4zFH8xkBcd$(~s$p%NA95 z?*wd%!7K|aRA^C+fg=>B6yvuQZ}=wrPv9oLoeTI@m$sspF#E~;2J04vLO6Tb(vg1= zNrSnaiEY2;t<_o4ib25I)tmA!*OB`Ys*)DTH!mvB)#Iy&HYQteKvz{2;8o?l7C?Mg$IkJT#-u6?$bwHX9U+*u}wo7XXjl?_zfHscT(nl=j43X7@ayc z9iEtCG}HhMim`l*xrVzmWbZm8aFk<_*)EC7bwxc1o5lEzS6gq|ezWLWVM`ypN4D~G zhllx3Ba3j~?ZWzO<$}4Wp-R*$F#9O(?bn45_)FGwJ%Z>~5bPR5C>z9`hoi^TK6t_- zJ5F~#j=AnF6mQ1g$EOcAQfg%YNVZ%>3jHOj@6jzXPR^$9t4(~`bz_#@a~>86l#_HY ze=)wPKYF^M)>St)CrR!XlyH)lV)e3`PLzb>KXb(Sc^caQ#W`TX!kX>u# zG@COA--1@8wZ@eRq-bE(8umklF!|!4z*Yb0R#}-3w zzyy5J>RQ2i>d5_vs+rPDMoUxaj1))G?sE{3gyT)Tx?OKK4~_Wgl{2)2{XlM=4R9aC zJ_|F%%-6Fv?m07{(NmIq1s+@$Uh9+ryqvgR!(l;f8&^)zA=EK$_BfgxlJmWXMpXM}ez1^0m*5R7o z%35)tldmM`u0@x)#Z#^bp?&+V-gpmW4AZ!~T^gR!V2oH98*ne@`Kes-YSsh{YCx{Wp9>b8v6v1uJE&=u2v z%U4&w{p^23Mef1Yi&lIug8$=@;cIk+8x$nM;YD2ka9dk`B|Rha$~~SwCVZrNqH3hs zB&2z-xbeMFt}6YeCqTyeHe)+e)n!~zT^MLleZsi&JHs{Us46WM9HF@T%nZAL=H1XD zOohbMDHen^<}?v;%H8Aj8E^8*4>JMnBY>p}NjOnLVh@PCeb<-E_#5C-H%l5s8OWoY zBe(H$`EoF`WsJFxDxc#vj=2YTf9_?uHa0K9064sfk9}INx7_u!7-K^t4*w@%x%gR4 zMBy{-D`s&`EsBP33&U({-sCW!abPepiwkpS0Ny+w=;Y@TzkCJVwT*SHgv{RRC}{Rm z$C-Uvr;phedJ{c|-oC-@Hg*AYsel}@M(NP=juPRvBPifK9qZ-jO@%#u%@fy#klDk8 zC2~pD8Ob7+)o0#@V>y$13oVoS@L812IWY|(Jj?{?rl1#YT+F8PpF2O#PhUJA&Nsrj zfi#a6V+mYK{y;Dk-&dZfURif3{qT@Ttp+GQj%xSr({nE5nq@dPK4`nHoP=dj<(S&) zz?Z^bc3+1XO^DXrGVCV^3m^Gv5x6+;5ogI8g-sn1R#}$->?#)-BnFXzU0NVJ1oC_U zG_^e@o3qCEe81Y&_|UH{$T{ z*`<1L{a6cCZ8GrP1i`{`19)cBg_4u5+gjHe3w1?vlet-5+?gN|P=B)Z2B zGmdPByd^syfAN%MlrL?VSl^mJ6?QbX=_tOAKU|k)PD)W$JvBY5CU$hcDVK4jXDao5 zxrYVqd8t^Aad&Yh9*Ob_{Z<>Z7##OQkNLdV;7K0Vqhf{6{z*ch(!jHI1~Q~FCKj5a3!bU3U4 zxr8K_9l=$ZBzMYTz??moifis2`NR{*jgM{7F2@#TwzXoBsL=L>pNN`TqQR`XPAjPdYhCL?3(NCZMIu;8Vv=pvbm5T!P>vG6u z2HNuNqG|@h)ZsnpQedGXwTKSS(|hVws*h2qT}x|w@$)>A!v1)CV z2rGp9hp^~W^#|%d=|;rd^9(3AFS3^Mx1G8&%vrA5?RN{En)a1-=#ZdkPglSf(l->X zlhhh9`hC>qG1$IqW#*B_;mlKTwWk@R@b>4&EEZm%iR2sIWG_ENQG32}HQ?)swD3H<;ctGe zMwrBoTrelB2BNmcG6w#_Z#F|BK(Z2NF@o>%e6x_TnGt5<^@|7_#{?U1yX4-`OtyVQ zi%B!u=BwP%STu2rf-nqt&XFIZ6TvN84`rR z#kt#DB@?WtBx{+1-%mICb9hZbi_5>0r}0BA6dOp!ZpuV+4%0;q^$k7GQpQY8ol>i! zOdS$pWMjxVew&!ojd6i7ZaCYvHW6m?O_t|4EFKGfg zKUv-RWInwe9GjpOqHOKz5Mn2eBNSeZJ&Y|IS!^5m@?~~C*H*-1b^{lCM|d6^W3oNq zqk5NlwJ{=E|2yK9IR$HWn#7?$EoWeOOw2mzC+_sIHL#3i1|;stGSWMzR2`pltgYSg zJi6X$EnBhcKoa4pY;#DopwS9TApU6t$EDn2(jom`13h)PFzuCdw%Lo9Y)~?$RY@rT z^Ho4jZ|qKthd-#5{GNA>Xg0xbL`G(e`lO-mR-w7u^eLgZ?6@Cwxk2~hXbDHp^i2R_ z^!4Uh;Da6JH(%h{=KDm8qnva+GFy^*=td(drDGTF@|X0>>={mh5rO0&hjGSRdH zf|{HR1)I4ha?=< zn}YZu^Ghar9DIb5JtoTO&4;-TRRCvv1NgW!kSi&ps@8^x?aRrxeF}MnjA8fH%ASd7 z#V{^UaJC;M{TGuz>! z+G#LxxMGE_5>N8R#Gn*ag%{~I;EP5%EH#R@E0L= zXj|}jc{6DPz^1vX0wQ5+X@I{!n4vLTz99r~uGTWx>ekFOK5`|&4L;-te7y?tPZ2ar z>?%7eff0`!b0Z(|@_~DuF_aVBQvm-8Q3SKK#AAzbpbRR6{&^Jc5;56AMr&TFZC@7(;c1*766up@Tt#JU01R(I)uEqPvK@xhFzHUt*TP zn&=0F{qAqgnp&;ihctiPbiDcUuszPPA_Ll#e<20upYX+#bb4Rnj|aQx|J3;46$xvV zPOy?q7!XfP`_16OJ@4ZfH!R${Nf7JHb0UK5p_16^`=`fil`eWfbOWg!GI}>X&KDCV zAZs3eiKelPM}|ES5C5E&>9a1GHwzyw;OP_w-J-NpYqM@GvYc)R_Sxxq*WDDLOpFEJ zH_cpg^PzClX=8M}!jS8vmpyCXyD+rS71wyy2Yo5UmGN5NyUF zq*bmO@RzG1#x1Le)5+3ebGTF{)D7y8eP`JYl3?o9ikic}$qX3BM)ngE>z55*A@nZzFd8b~)?wvZajYL!-9H zIA7>|dE^=&l~=+L&5pd1KOY}@YqjlDU~AUUR1K$B9TsjJd+?RJohapNfcEw~h7Vpz zR;M14k_Eoy6NXkgfWS>R4&qkASu7%Bfu66aK&k_Rqxv#$^U>!3j(u8`GojX4U0N&G zCRCoHy(&BK$CR^Aw89;Gmj;$7EN3`)L^W{B^5yZv;?MLUkPQISPFA$hZB%u#jUQVrKw4d>H+myY3p z>Slt7+)aoR%KZy#?>C=3MK;C#s1|L6?AP8M6xss6rjWY9V!!YK+Np8H&zCRSLhz4HlVElkK=VJo&9bs+G8mGe`QpAx2czlPN~0dux7j)DDF zvxBAqqS9r7ffz*tTQEwe(UanJ_1Oq%m~p=2mh;isclzYXd{QX_Eyke-H_F6oFNt-{ zFUvwu>AT)B6!{OXTug7;{dCV@kq26Q?JPyfae8?TpDvP{#`Yo0E<*FJEv>LwP2H(w z1L`1n_1mD#@}QxugUz^Oor~GhOLwo}ZV*K10jZo3ln<1kXWr$Kiwfq7SrLGe@TRHG-V0wTV(4f(y+prl z=!JRu3$#kS>HYuqZ#ic5tNOs$c9E8)ycy*Q(Y1X_FURo2bZY`CS21&RB?R;%I9!b* zK<<|COHB+f0j+#RWBmJDx%>IJKsfX+6F)1S7o^E`H&p#x5Qbb@|q z(`~-=%PsNK{*O*MA_n%0ELr2a6D0Z<8e}x0qEC;v<=TOQkwLwDPbWIsijF<09efByKpSX%r5D*=+DFnRx8AI(5I$Vk~t1jxk^vTXpYmkr->6V&nfO#iy@g06_2 zeeS?(VhYJ5fm6?J=tpZFsI6j+NJwPVk=C@4vy0BYZR z*u`-5mH|+rYA$=)ch|3oUI%DcIKmXP63g^k4}VXjU)5UVJmvE!GX!Ub1hXA`M2!ZR zH;?WR@O&x@NYZ|$ufJC_z3IUvmdx!{L^&jpQJ3elY$0)VVtZ-xHHnSub+e%*e#VV! zhbQw#mMWU?o(K-|I({B7IS?7f8tlO;_P1++7~%*Ryh%W#0`xIf^P(fCWvjCdMvJ?j zNQrOB-xk0%sEqg}ZhCAFkCkitl%>>n)^Z3hD_U&x>%IxuZnT%wVmZQ$T-3+R8$sH*m*)FRU3O1JybcrRS|;Vt zAfCFJL!QI-PUhIDUHg$4=!gpAX70s9hJ76O&!OA$@`6$geIoDH19O3fym7Vm{xXOa zB7DmaC+(i7yqNX;-z!a70i_HeUX0+jovU&`(wv^2hKh)d-Mt)PwVEr_(RUr=j&(?t zmDzUUzn>U?J+I7u?LMf;TK2O({wc(FEFR`gI2Trag&Owf<3)e$PNlZ^L0mQtv=Fj> z#Z|68eB!F2*m^tQ4c+^*sdc|Aj&GwawLJKNma;2(J=A~+ymD>oa}8<23eOM5M7ROW zW5>{^#HzcVDAG&RZ|ztpo~yif=pkQ6TEAr7n;4gJ47H|*FF>B3D;#KHssZ8zPD>Pe zdU~jZ6OZBH;e|SDtV|wz)6MSCGQ+kE;5;}3P+3_Fz1puTw*%F)hK&v5;gOM#ii$LV zG8NK_26SNo7tu!Qf_FgY{5=hgsk#pCpaaMZ9}f6bK<|1!E_imHnEUh<(#6+iO6h-v z2AHOnT92#IzgB$TTsvFJwX|7tnvdyO;#*o#_2zC-!b5&Ib`%)zA|Yw|8@?KzGXV9t zwwY;_w2WD7Tz91mdQ|hxj;t4+2Y%P_M^;lnO%Qj+0DZ~WZZl6U^i5C3ZFkP(;yt0Y zlN&cvDZ&mvJ=j538x+N6h&gEXH>7DU9TKxCMM{D8bkU!oouR~!A3svCu*AV48FkWx@&#klX^h*8v=ClG0Di{mrpF$+x>-}umAX6PsxmC z^2Xoadj&i$uk#$`Mh}Mm7}e-sv3K?MzdLF~QDUv?GdJ1WNM8}$F??boakaPc^yZgZ z_mk*~O2+oPfIV$(&4AZo(k}P`F-&A+1;RQG#mb`yp`07a9=YHHwW7$HbMOR!QnhqR zA2PGWr2Di^MrrnOuttGKHk><}dD1iA$GGgY_-7)+N8C>}6BqRIlitP__-7sjUp;LP z$zmZ}IP<6q+W+P0;drG9D+sbt5~yU}81rN{J>Np43G$gdv49M;8qhFTwB0WG82JKP zQK%SF;S4~t+t;#c$HQsbiy-o_g9S*mKLHZ#Xgt;m+(|%2u>gp-0cCcjheGY*Xb|+< z`;8Fv#+ScGI)Ho`B4fV4sSH#A^a};A#F8wM%Nz|CfWycRDP1_oMageHFy<5ms=qea zY7;r9rl{5h2tP1tD&O}J0&E6%96NwSg)F)9=M7}eHMC671nyi+6;m@S(JX7ct^zbq zM5LNZPdE1fHTv`Rhksi8UK4i#*&&F0^*9Y<;f|?dI?Nq(d_7y}2CNk*I|i&@I?w+G z_yO1>!K2{+KqH%T6XK$+tsU!Rm^@^@MJxJ%IFia*R8{p2kc@KJrR_~bK>>;TVR^2j z+Ue{AiqIcPJ4+(bj(zY7=RT?3UYr`rsDbK-O)Vp}DpS}{+j*ZkT+wKLx`>TxGsGr` zH(gqWk^JY!p`Eq@P9;HodU01>DO?;#!#2lf*jp2w_nhKFG0+$J@1>U*qg!@!gHEnC zdO+kyVzF1y2r<9)51CBf_Zq-xzQXwjKpye-Hy~yOgm!b4M(CY|9}{ha`^`BZnv=cp z?2+MQMk2RoKu1&Kd1eTRQ1p*pT3dy_CnkPrmUwDXKVkRe&JRi)4BdH5T))b64CuM& z-d?j0DS*Fv!kPFZ_`{Q^p=`Iw#g%>))UkAFX%UY0<&Vy`wyP`x#k{`6p!+-~*CEpA z3mCoKyAKWG!F`$Wwa?lPvCj$O0V`Z}$e_6W?#}zF3_Pc#&Qm3mWj!Y^g|RJ%26EYE z(q^Lr#2poXu0VYN5eXEc5ljMFpy=h32WaQXJaKnYPfH^}FtWG^kVDdQM}Glb6dAhc z6C0q#-u(hkdOj}BixN_<2f7@jidWXw9t5Tj(nb7zLY6xKwF%VI)6@R4))BYd05GDn zbC&rs?mKkm4SLXf^8UOaA%7-H68+FkU-%gxF0PL2)14;V#k9Y#a}J;P_!X0h2~2!T z%H&Z6)&AuO`>dHeQqjoRJUpjP>jPiuZPt$!9bt97F`9~xhrRYx%^bgsXrIcgU~PKN znfx2Jfmq~Ds^z{OiA8%zGD+sO z0UbvsIu1+>c-_ouc>_nA#>&XZNFi#7Oisaop5e}ly@E<`1Cdi5U*!XPENX# z8fj`u>*(yV%{=)j!CHj(d?|=cB@l3ZZEy&$Q$h<5F_+;8eB5!&V&LCh6#?`?9Zko) zz%0myN0W1|nL`~b2f!wJd-Eyf_9v%@Ua1RMJl?4_IEwBL4K*SojR#^z_tnnbPv*nK zlgVsQry7@NA1x!gv#?v@!vPxQ2d{yTB1<0M)#_19&d7+mnR~X=h2F>g&Wp-(Y;@vd zkiOc4O?2MXRcKJnpZFTEyH1v4Im(tZL z)ciF5;w+Oaw=F-Nn|;`TVPWn_GC1<6jy3>rZ-%^ug*Gx$6*jghGhL^ycF@nfyd@qv zap<}35AvsfUsbzy&y;lZi7hUilluD9aLHu6!Q=QiU$R#8`37Fi!l14Ds)R>I8Q|W0BOV;uY+?2Ey;~kp{6ilI@9do!Nxu+_Sg`bZ$&SVC((z?(pIqoeYW5fosYgg`VvsN-SG5ufWb z2B1c>Td4X%o9p)-kO^eB(YfAk-PZGPC=-k$JXs|>hd z3;=?-&pH?k{!^~UOCz7=1Q2N{d%(nLy$uc^Lmsd^;_GWEbOuE2M!)=rT87SQ4rYGY zJhDW$#aOZUhuv>ny^2mvqZDDu*f3Z;-fGN`k+|B?3{7*Ae5M0c^~`YxU;G>6QZNmy_ioB84?i@<=fy#NS` z-@aj-9l1Ww{XT3j0cTCDdagqTZwQ2axEM9dI?XSyWkW+lov+4tIc;X>;LwN%0fFT| zBKhrD-f?f?)RDey&$S_73?O3GW*1PZnRb6gys?Sy=n#FW$Np0D-+Eg=CC!hxa%fDO zKrIDKUaHsoL&Kc+S|~Z`bu72O=GN1As$l*c8cI4@mN{7S&uQ8m8p5~W4iC#sU5w#o zDA{Y?o|?M9J2*ky#>9+uEL}EUzW9eH01D5KBjp6c3N(#%zVGMJoop zzonzVi{APDEQ8kqIvPUIVQYwZ3Vp;=Bu1<^zd8c{neR0_fXI)>!7>y1X9wS)+iWIW zb#)0P!cdBD8tdxnbSL;zfs9rI2(6(=eUGbkB_xUzvjMRv9BaoDhg=F1WRS!?3>mNk zAdB+`hFq$&nOYzAGFLp7?EyQ;`Zio$D?<3z5w(_qRM06fSvHm{_kL5(u&?i_bRD%Pg{!9{jkQHHQvN^z!r!pp8B zG?IVQj_VU2G9IGw1@dnK3KIm(#m?3{L@jFxfH|KEgO)>m8~b^d3F5i`Ns{bdY1nos zRH2kB5>%8bLPZq1O~cv=HNl{}_OXhxL)Srt zH2hU}E=bRA_ab%MrCHnKvraUq*lk?|P;Ujb{>C9 zU#})#dUKAKD!AxG7hV6TRBP(#Q*N6KNhSLL%^qS(Ae!fDq2#TIRQ8JhU8}R&vCF_U zod@be%ZFU&9I>@+8-(8$*4#kW$F!~j(&dSVV1ab(7BIeH`Ea=_^m4xqSPLZthfx!1 z#+Q%Z3#m6dAtPfLfNM6wr!&RF`ORHj+^KjjP5J;3EZ3cQ0LJu=R?!k}cg#0GuE_<& z-zBco7M1?E3+?|0vlMkZ(Se24t_#>mbeJE89q-pt8I_PAJDH<2c)QgrAb zEp_7w0T?~eU}3VZA^ZCE-YxjZ0--OhcYF~CU;=bj2;v_rk$`hMAH{kFMuC9wKh$;q zJCalb51smF0Pk;|Z}va|SiN9vo8S^s3G%Z_A=Y6Ei6~(fZokOv&34l$}+XFGfc%TjCHOr8=C1ey$9B zW)$>1-I>#Kv;c5Bqyz!#l(oQDe?Z<03&C@Zw@bFA*@C{ec4yNTz<5`_@7?K|e`I8& ze2!2$FicScRC=e)M=X{TISLaw!p;KOb}NkuGNj(UL@#8(wzioU;RHVc!S4q5YbJ-q zC*+rpfV{Pb%?oG_wzfW443}UN0aHmaXCgU(EgI?eerOWl2DDQgLra8V1Y9Y6Rm}KG zE!5h#tVRS-nDdcBfbQLg2`{zfw&~1(>J7mvZ`hQ-S>q2+uC5o&389CWwsU*nkqsVy zLv4k12a$bvB@=TyOL-K7EUIeh|5`Vll;Y!45ca=UPFp1#HbK<;(q~V9Z;pUj8c!&; zba}`u9^`OeI|6K4>NEe-fZxC`!>Zjo?8f@2Z5bFCPMdZ_0^Kyl0&V?hP*-}Q#LM(g zcQg}Fe=o*u4;hBXABFonhnBp*&k1&Ge_4_%(agUas2d)!zi4u%-o;#gy$oQ#jzPP8 zGVNg__b;OTcJ_c!gEkY8$b#ql;US1jO!6+}kqz>cE`}sLd2ij$*0Mqyp7VRJdt%9eGA>pJx2%@4La7MfW0vV1S&nXXA;M&hqlz^66+QWlKvo!hZSD1>% zEu_=7JHv|B%V9!r^J;RB9|@XFebZq*J$-SL`uB)R-R$O!MIy^Yy_BO>p0-yvQi2x* z=(!-=HV`9gd@$>rGNb}Inhz=PvAE}ySm*&pY4aE>KG$v@K`qDaUQQ1``}vdC&ONu8 z2IH@SunHdgoOI%4-`HuCbhH>$JqzFe{n;$x4H{we8K#x+0^#1nK%Q?JpF|)p#LE8) zm@FC>G;b3Dd~fV{v39~cX4b|65RZ>$@z;uY~+Sx^dXb&7cfll9(g*ykCgF=DL7-(S>af}^!$^k(v3Uh+Sa^4v!rnG=U&m}^}JFom20@?vJ^YsMfd+; z?X9Dt+}{4-p;bx+rNIQ12Bl#XMNkk#C6tyD>F!WaRKlR8EV{c(1XM}{q`Px~p=X$R zuN!^7&-*;<{jK%;<7F+*S!W$)?tSmL_TJYgt_Z6EyW zE+A=>BOXKK^4S%kepmb-Bwg_G27>OGtpU49N3yX*#u@CaA$pmuGgewqIaMwPZ*_h_ zwhlil-d)l}zeSGK?|BsO08f7Ff*4`E4>4lqlgtf3TjLqds;ZG(4f^r5U2sG;w{3Z(lWcvO1l)!@;hv>CEUaJ#no{7QF6xp* z{QO~T&5e&;>MSk(ds!`&`Bb7g^H(1EWi3<34UWYaCsbDMZl=l!ZCWtfF>Keq@!s!p zR)7;M@*&9VY?K>>X}q5ra?u$XaKn!`@Kl7wFNLPHgxn5kulIm<0~jCj=)RXqVP1BlWM;O}OZ?o+&rSRG zO#Nq(&L|Y$tgK5EC-mV6=>l4-_|%z~fM#@U3??N_3f!09w8YwdNzxdY>B(SHNAI0C z5+C;aVO9@BrL?8i^EthBzd3lc;i?T0>Q671QHgzLuE3!5ses;bvRrQsQ|5Ll)b-YM zY&W^{(+p1kBWOyU%Rye0`(xn=u*z@4=SD)VINfQR#D77tX41YT`FZ;@m#LXB?4JP> zL2W-?>5Bepzo6%b_X-k^$hc&x@rAfNZ??`|HslTxyriVI(Q>tZ0qP$mPDD3VH3rpc!ch5nP)d7Oxsuu9@}m@#$!1 z(~WDjUpv!hTYY0$8Z$1CF84)xHa+9T`;EJcQM%)I6-EG|UL!Zrz!;Tb&CH ztK8AMp!0ef`k7_2i9WE=abU%b^0<|s^VZjSOa|8^*CBf*4f=@=@45%dZzYgE-d8?+ z(FY$8nVs%1FQ0WsR13FgfnwYAw48X)f)`wN+_`*JHora#=aRW+o%o~ub*_#L_dE}} zPVnT%zJXzFi?XwPc(DjqG0xK!sg?cJurb-PbwuP^^f*zA>bJa)#|{C3SP0SMHPqR{V>D(C1yjqnTz5$W?Bc z_b5xk;U{TlsH%z7UR(6%()7DRZKCQmY69dKY6GX+_C3xp2tX!f8fmhI)d1FZn-5Cf z>SU$J@-YA+G#6H3?_7)IJ2{h8V#ya~cIdzftF@48DzVddvCZ`H0+S)MX-G`@Dq>#b z9WReB!{%RGE-hq>mnSI-OIe`=V?QK?%8#26Q9}qY!OZ9ky$)EK5 zU#3$?4+9$k-9!xg^8E{IQ{-~r)lD7Sw>9ew!TIwMt=6!6*f((mj5vr!uB`ldwCi3r zNNKRxY*_c4v0CoUePd$M(CHX^g;PDW!3}1@R|yBVP3Wxdqn0l$9i8uN8DdGU=X~sl ztLo|P^bDc>N3EBk$lNfSk%U00Xh?{zzbTi63y37M50GDe9* zC`6v?8%(4E{}x8u{%f$D7&CX-A`_#VXAv$5%V;J&JDT z6Y4iPmLKC9*Mr8E-wwgioU)+oQ?!nSoR>6{7@p(j8ZUh@<=3PF!>-SV8M{5;$O=DF zj1Wjj443kXWLCQw!X;#{)(E~Tdr_medekfb`DyBrPgW!M7d&!C)R41#$U|n`*y3o+z!(%JJ%`s1&+(7wh1G-1i{391Jj6I2vjx$YowP?`#(U3ymZe(c@?9j|eCl$|3@9CYILC3}^Oo-d2doEW^QB=^mOgStqkqw%w)ok~ zRL~pW%AYLU%ns7QKBS*KirT359n{hwSpB{-6*0Bi+uyV;uqS=$5RDnXtK;ewGKnR) zH_s@}(uyAzwq||ePgSI6YzfqdKBxX~897#l#I&!EaUh1;p3{(x@wg%W6rX{=q@_K(Ruy1Zg*>Z z8=sJ|j2dxa`}5*OvE?IvAT@H?NY@#l)uy#WDN<=>;Ml*tGpmocd&Gp#ocPha2I zW4OQuN`}3s-aKD>Q%){_`m=*V2JDnGz{7KfV#&6X5P`>CrCu8dIy$P~G(B=j+38}u z?*`*X2ARh3-wgF8(-N)A~a%Vq3(qz9AclbqX({phWIb zp>%`X8D?f2@x#E`tltQlapWyl9iDPoPGW_mW^Y=u(Zqg#VkPZeL=R8ao;DnwH) z_2!2%?^eNt+lJ9xl*{YMF-wnkMO!+6V$U6%NpbOvyg9HcAn_{0fG?9xSA^kt>o2K& zxN^Jz&fx-f4WSQxHm1VJeO4$OtDPn=nS)G(&s-gItHipaNAGM=^w+uu7PwiMI)tPz zGC9*u+Ou1k)2LRnwQ^Tqv*A?_B5-Xv+H zet4UsZTrJ<9jZCcPYxAX%1q-vTRKm#hgwxMc0X+HVAt1=X({)Q=`QAJc)rpb;1j%H zARJyLM22m24<6a0&m3?_H~1#VEKp7+5Kkez_aJX$f2?nTX4BzGe|&X(QcY}wx8=p8D|iPlgKRFwz&>*Jk;(JB z#W*rP8>%N7^r7=)Q3M)+_lHP6X7wSs+>y6t;8x~9@&MPcz=$TELaVrH*{ICQZ!r-9 zyuZO*?Y$4Wrxl#q*4kWKSa-2&5Y1$VC2p<${bh|I3Q5PL^<@7jrVMGWYLuc#FOkAVt@9XV!S@W_w?9Q!?2x?*YDfe zI};1q_`QKbJ#!pENlesWMCnRQN*w-!z(>C5KZ)P{l}8PT&s!$vd7`a_R?OP*YuQX% zbjGd{56Y(Y`+{E?nI$V7)6W8AaTlIUC2X;r#tAJ?mB<`x_x}EVj|GnpX$_Z@m7QRy zDBG^F;Tqj)PrI$Jy*K(sqSTE$xRmXwbkP;dzYUCnfGxdsV@)v}z32JRQ5J(OS9F}> z#cb{Zh~?D9h#+d zzwXIQg4x4#lFJs_sQQ_zpavV;iAaqtWc%)>k`|^Z{Z{z8<~iR9Iq{@5R4m8y!R3R~ z@l?h4rS}`tkL(`OMbopanuHDC?K!h{JNu|^m6|f_O3>^5FT&)|VR5FOFHgNT=o_9E zYu38$W0rGJfN)S3ORVMWi)`J$t6MliC|LScUg(U+g}{>!rMT)u%_KC*d1)SE?nmw4dA{%iqqF`_Vm&G);&tdia3JE zq<@E6$rZ(bb;Aj@8ZG%pFU@1UyN-R6Be)HSWg-}40VX?TB%x-Ft)^W#Huz^e(-6r- zqLzjW^7*$9;0RI3Fra2T^F(876-ys9n=saArmXU1p;^#4f47nNGigfoI02{N^LF)u z@E`n74&fUKV~ayM5G(d~l5L5R?KJ&sZ5ALlUedE1s%->7kv;Zd2&~){yvwWE$ zIy35V#X;daTfzliRQEDhrVGg*R!wW!?d&}exrNN*Ng0TIUb+sdB50irQOd;@TlB^z?lW5))(xitSMXJV}W}k{ViHBa{GUy2YmFi1&i>;o&S=HXdZ!{`P@4+0Lym z3ydjpI6kK6=swC&;_8_~+~Il(_DZxmnja-%pNeBnO@5QYBpW*=I}LKpIZp9g_X_QI z#HJ#N-Zw=g2htgVZitC!UMIoUJ#nzJIE3tHsP*T`khFlPMDkiAZMPvW zQLSMt)h>^d&BUL-yDAzchTOTTbMcAU`15_wSY#@`DX1t^75SaK`U1CPD;%R@-Cun* zgQy&plXxP|0j0|JvE`uW`^5HW(9C@hvyMgfW+l%gKY?*$Fm)@#U~x)yYq9?{k2`goNMJkFRrnPRPi}NY~-VS;X=uJSs|jt{=D>W^0o_zBM&X%*-g>zI}Lacf%Or z#S9c@wP76`9e2QI2Qh#uTWw@}85C4+B4Q*G;|6{xv0dD&)*xl$+!;SYH_bU&-b&eH zv&HyKbZ4hyBcjH5VbEnE(6_=7jnl6zejq`VF(j?q${t;Q`I)h8e>uKSy}I=wB{^Bd ztc4kD8G7(;eB(m)!E zFqf$)DL3!cz(I%6^X2bFNY07#SuGM7jZ^US$0;25r-NCCk#D|>R6LR|4-aP?A0J0B zO$7x7MkXdK@SVWew5&=a`gEtX3g2cP|GWtWlXubKA5b^xJB-2Q*@s(W}nm|1-Z zU$I;sg!rM{!Uz0LXc>`BgzU@}!i4QOc+-p$o|M$M5&ogN4|=|4 z#m*G>y1qzrhy|o3Rt8D(Rd)}ZLDr_j$A#P>1MqdBGF^(#WpLjlBkX;X(Ul5$9`zhp`=Qq;VYUB$*f5be@=sZ$g)m}9P1 z-V)=V7LgbT?5D}iDD=3ZC+8^V!XeD&rz@^Uu)k&w%^*)c#sB61Ky*QAX6yESSlC+Z zWBAZEw(uVtIk!1x6F$NgZUyzqu( z@7qg;;T~{KO2d|>+NrP--%(t1a2zS z^^)d1Jf3&f#Ui6r-*uLv$}*y{H7cB)#C z2G~hFSRbBoL@e4u|7hOUuWi@biwhp7`ASWjK&TBa8gD-_IURd2L8_>mpmN;y#>7OD zvRl*IAwB+>;ic>&=sH<~tKljc*B+)*J(0_T^Pj2pU(ov{fZm_>LGL6}@C*ZH$e(Pz zW-#gliiA)7hb#W)&Yw4`e@-j2GZ}`s5dvho0*<8d1xz(*`fg(s37*SktOHuvy6$ZT zs)flK8Jb1T3=cv&?bbZT7QU3;$(5u{03pPqgc-rccgru?OWnvD%@gBiF5H?C8!fme z&J?fzRlfO73wQg5A6TQcuRrZGh1_*rk4BPq(UpW%qwr3#~hN2u9)qMPJ;3 z0mFGGY2{BD=g)MUJnQYuh2;v28MtX7EInwZp@{pR)I-$N6VtJ_X5eRq07^j2!*<pa|_K(OKKPS*(LLUYEp{zcD%zKYN!s#He9W9-vAVTtS)5lbg}qTpnr$ z^JNsm!K@ssOjnJU5;kt6(f8RgMUXOZvwAF>EeSS36BvVnwc=JYY>Y3nR~kS69HdwC z*HX$34kv+;F=SIBn*QnL=Gs&!g$Z+LyN z99^Z9XT~h1R;xD69B$&9R6SSmxyNg2km&Iu&H!B6Mq_yBa{n5ORj~daO~pJ^)YXqr zA9wS(8%V<(F{yT9AyX4rwHaXXI=iV@ecI_eEx(Ha67!E(#Qir=JyhY!1GI637R<5)y!Qb|aD`uFSipn$ak8OY9K|chkZX4c- zj*fn8V2%LP-NnS6c3+!sDN%oN_tssC6Nl+){YL_hl<;z(l-(rs47@2El|$m}B+u(_ zkWLd-PbA|dPz}$~2J1V%G`mxG@byw9E#7~_@eL`HV_Tx-u9&t^4(Vu7b2>PQi~$c4 zJeCnY6Nl?y-Bo(ofW$-t{~R_Bj#gl_G=mfK{lZ5_28yjuOG~q5bl;ShC)Lumr`Q_y z6a*HqDYe&y)nm4ct9* )YnL)oq(rg%9S|07UI6vTPpgn)>jlZizf8d()=hX8qJm zxmM})tGE1)YJa=!_V7O)Wx!;ue`sX%=kvE#r zjp#CyLeGYKOUnJr#U5V1`7ZJZi^D0VamUc#<{+ngwd0m$+F6J<1$AqxIFP`PR z4Y#GE!~MA9#Uf1fzKdU{*Q}^(j4vFyn^N2>+l(f1Yz|>ix6QP7Zmn2xCT!dtuP4)4 zOxQn2{Puz?%KQ69?k5q~1QvtRm#rIC)a+8>y(x({d7kCs1g4q|N0}U2hMFzk@Fon7 zMZJ!|@AB^r^*M@_;*^wkMn|dE`?PAevl1*0aaY7|qxX)4wf+$iA0)DP?J8pim~M$i zuXZXP`+Gv}XMwbV8Hh}F46&p>)tL^%?lU|IwSvNVrZ#R~3~jEy!7S~2go*MeyeVj9 zjOs~#NA%ww6^4sQ>uk_x1G~L<7f(^kV*InqU#$%Ic8Q2LOI_PxD=~78B3;R|JX0r5 zc%okQS!dek?^}LC-m)Y4nXgfoe)}&MSEa-DH8>9Lu(Tc9d(l^R@Ubhr%_N5YmMEM6 zk>Rf3!Z3d8DDud+njcTupK3M1C0q(NaQcR;URa#Tb6)Pki(laVw%TaDJ;g;~m{`Gv z+1NcTc#HM-e?In^6J8MvBw2`<`%h&*xBjT{IAA2MJAxN?khH6E6)(O!Kq6suZEYrX zWHr!2>!Dc5WwomP*i^{6@geA+Eb>hVz;7F6LsY%X>X3BhJ zz}F-W%BQ^Xo34ra`!p#S3vQt})lk_T@;?qds6_e_5yB?Vd*bz20<{6*AgaV@TzF=0 zP^SIXH>r&8JwDma~r>5=kdJQ*QxBKYLuQr&We7kYgp#hQYONIg3i0F z1mhm1YRtv~Vx`e#?9kQyr+Ps$N$GSPVZq?Mv>nPXTyA zAVr3r_|oHPNlHlVrzx=BfFA)Rm~FK8DlHiLumozq`f-;2;nV5#rzft9wbRm&O-xV; z&m^OA+lNAkOe>7G!=mp_`0fevB29Mq__#B1Q`~0oJ8*ao95^s)j>h%5zXpyr5kG?l zey&}&hL$i8Q#BtYDV^0aI>M8%dyJV9h0(X3e#FDjP9Oc`N0*B1zzC78vs#VhyV%pR zcDGkGdu}xLMo(HD_p$aPt2eAQ&$NMmxpn_VM95-zFCZQ($>7K7y{>N760MHGUVE?H zm`Dhi*tSP)I$z3Lzrz3s%HS{T^Xam>9~{iQzDwRXbZ8=Wa;ZloRaqOW3tr9e1q(wR3>LAN%r&a-Eik7C34m?g@mKe>a$We#f{v4nu9vL+3^AiA zcsUSe)6_oRv(6TN?CZFvP}xrsw^qrQxk22b_P2FAbNKhiQ!b~KbQdfY5vCG;rJh!9 zYb=lpcEaNt0cYPvPPhCRnb$_W(SzpSRLTt70B2rNm;L^d`y zWw=K)@lKNl6@JP0r}|;A$%2~B6(YELoxj(0wKPLAQnOj+K%(5VH?Gi_+xy*fA@>*0 zi}&-SPG_u-|5Cnr^8k&Nm&SZ;?d6*{Z@$k{I91f_zQB)J$V}_W?eRL~r>=LsgSS$v zQ(*brvj#M!;t>=^s%hn!^~IVhi`!U@XZts>w=N%BNjmGV_;warX&!ritA>4a7!gnedOW8ht!2CU+xSma*YWHP}WKk4pj>d(q0Ckad0qO zUttgi$5>6|L76@eq2izyEmf3p)qVG?aN+OSrlQM?#2c=}%+Kr4gw@xSi8zF;ANJ!L z98uoa7@ax>F3o_`R$HNO? zDP?2ay(a`09lnmo+<%QpN~iKb%`S^5KZn~7r`rL^LxyJ5-WwyU=IXEpfK z>{yj037c!!@QDxFVt7@xuiuA5|!6vG}#V{yvlvNc7n~kLVV`h?n`f`V#NC zb1#6Mn*jA6!l#;}JUx399z0doS8ekQlCkAGP8hAk)^@RiU%-W@yx@i-1+-rak6p_p!)2^4@i; z{k8gno?=~;?>IG#&lmId1nVKjxew|5Sz=vd-m+D`r?qG$s#!GARDNs>uO*gw!i16y z5^7ClKfmwN-0$f!yTTn1_!`_P_eGzL755xoR~g;=P=#`$o@?yZ%dz=Jfi+x}9d#iU z>XD4uSg!l_jACW-+n4FbVB`Qlx~Tq4sy#5yPv6RW^5n_4Z{L0aEj`lm&4D)sxegXZ zty~88J9U|!P0XO~zd@$H7E5Q3u6TgksfJ$PoN$%mzlcNt7f? zu_OgDTTP9-o&SQs{(vhrbu5!g)zXR&`t@0vf#wS)Tp3E@_wRGM6~|Ghx!*P9DU!^x zbwnvY6tvi;sM(I+nj%WtexgC!aQ=w^w7<1O_isxs7d88FGBYc=xk*S$O74HnWF;1q zo+v907XN|8xv7LN`$j(FIP3~=(S?2tmebN1AX(c?O~Q~Yh2H_<0VJH(3z#wI?z&?JqJG*oK8eWKz1Eph@QP(W z_H{{()X_H zIf()O4{md-p=9){Jecz}t}k`*S^t~-Iu(az6<>2sfo=E%nz%zhdf(;Nm%N)UOUX_+ zI@HaIru;O=lc=7J(ooe+%IEV{%43sasNbZ!xh>x3Q3$iatl&b)enj}bQ`9Fl+oS7v ztLioX`ST~?`de6d5mOu^YA)h1`91Q{d#A&a8XoOYv{Wh)4P&g|mY!a!*{b%=o;9Os z2_V-lv%c0d{RlBV1Ahz@?6*DHdO<%-z6u3X#+w&CK+0%rPjC@?<@v)xYY`@PN+PKt zImOzQ*PVVZKMG%EVU$fZVj3u8R>hEwn#>;7G}9%oU1D-Q{k^7@z{Ie>)?JcYf7e6O zP7iZ7la!1MbYDmrnn74PPOy-k>F_X>SDxiE438km-TVFMCtj7*ZpKfV&E;ei*&jk3 zZgc084vN8yVbEJ)%j|L1-=>m7kX@#R1Seu7j%48_!2B@6C3M)u4lEt_9l$9b}qVkXHnY#)kr_9 z0-Wc9LaqIcK?19SCNI7o`^N~NWxLB)XQ0C1odU8GG-O%c%IR;(DJ z@`6+g0wein>u72=BBdoDee9|O7}C_#)EOQg0AN}w-LXjipavqt<0=HFz&*L<`g%B5z%x->_yGL4G(tFXxOWLLWX3dotF9|Vr|OP9-wT_{SR3;q*~SQ-MeOQ zFN~?g(m?^mzp`#NZ6O*soDfE>2cXhaJJhZ@f9~7|IH$lX+C#OTahD2r$R>hJ*)ar* zU0W-KWay8dKTn-M|1yM4{Lby$mm#I)OT>-j^;uL?OqQ7IY?o=>o4`jzPAY@wf)|9wyuK zx^vGu*?#!65K5$-9r3~OL#csNps0t%y#@&^xWNN*I1yu>VAK@Ku{c~+(b1EL_LW0{ z(S4S4S#jFkl;58N&7;?1YV{0w)I6pyCLa7_PEAey+18+4cM9sZu;SVWa~`v$dq4>= z1I*EXw-a?2h9|)s{5u7Gk*Gz&ho+X=`X-YD*+(h(R%K?VZfwI6!(rs`vE| zrUr2X|G%l8`js?4pWSsxG0&Yvl3GwOW|oBIUCt%=D`lZemX}9Y>0s{4e=1TwpO!V}v{G zV7(Aj7NHU~YcXsLW+`ebv#~)YOplA_kfny!|3$eW1hq1=(9t8=d&Z={ay%mAF_fp9 zsr1>G5C@Ia-_uvk$*KNI!$@(zZ=^x=jCZc1P%~P`XE`i`*$IEkL-G}g#8Juj=$vK} z8QImUp(gF(MQulMy|&Mn61d9j=Kd!NkqDr)Sw-Z<*J1`#dl1j3+u_PYQIy~cdkcb= zBk&IwI$;DqEl$IF@x{A$N0C}%P?+oo=-%$f!Gtt8b6fFPP<&TR8jBE)DwQz$AqKxyz-CpYr zyn@28eWqg%si8KRq!aqa$yo!QM&@Ajr;vGD~>e*viVkwQPO zj`3D1+x(vux${;!dMC@Pt+88MO(P>UjV`gR)1pM7!s|M(dNMbv3B|8TqDza~vjY0q zatr^t@~_+sbsJ>9Jp(_DMhvKQw>eG@oB1ZA6l6aK$Ipw?I4;$__5AysvZzrQ&N z;I63avhMW|R)rd$z5T5x%VhZI_yliMoz&ghEHRBO4cY$*Wj)rTuK+t5SVd%hXO$z} zGP=Z*t)cLdGo39Hef``QyH-}z%{`UHd7H0RB94kg>3QF`nOAd*9^Q~9-oEqB=O+_R z1T}k<^LqeEd~8ME-=s_(%xQXh`k6W^E)zJ`K&7-8q&Qh_t`Hr&M6GDM%vBKmmBHD5 z8{#AR#u`hPzkvV<3zM|^!)aGuXb*&%C z#$zo-O%TUq59*?dH^~FLzxCP}&mA4~OQWzfK-EvV%dTA^RGe+Eux$K(Ei+Qg;4mU! z1lfaRqS7%yQ753-#~3)XRKlhUFeNK+Gog4Da`C8Y%F-=6n$p=fiR)W(fzG73ZOu}W z4qW_(SI1B)(Lx`Gd*m4KBv5x}K?WqMcZYj4JZo{AOUm-{UnK`Lp|%ZNr-w$1o$pa3 z`YwC#c^kC9cs{T@tVyJv_3I&)oV3|oVa^su#B-qEgsZY9zFD>T&$`H#qS zcx-k<_&?=#{vl(0C_|Vc@^v-?Lxl zye@pu>i(=qj~LFp0}(G$tad6=2b8Qx7Q?WP6biSg%(R6hLcy^iS5dbWL)#UDs(yy? z|2D)ysl31V(lV#2xWBBxyd7LYfbR2{eD>S7Yw%f`TBsaXwvb&R0h_^dBM8ddfG9JI zng#!@vva9`6HUaTzcn`AfgKg@YPE1EBsnK2XudpHCWCDeMd26C(VuNcszV5(vj=`| zF4e2&72~tX8P1Y$Q!3JD`%bx(P#520TosM6*K^{@@eLav+MD2g;B)`-f9;lDF-2G) zQmMpAyag|ecTo2XpkHKjhpp{o$wPg6W9=+WIl{JaV<-ot5)Vy)royHKYD}Hy-?%pe z^%j|_nFOp>ie{kTCh#Kqz@x^tdWReNb*YG7TU&cvMR<)SxMfvgfi2~=_Jc>)b;ucn zih)6b)zM%Na8p^?{vO{kF{7{Kge_lU$=k&0iF4W-l9k@Z4J(^q8>&mBX5S@UldbQY zlTFaD5DT3NM+ueN(o{*OT~6G;GF+rjT)L8T%gN~?5_X~Z6%*8PqeV{mzc>B?a|q{( z1$-G`TZ(OMgi{`J(nii*&<`Px5t(^2gnO=R$l*eHyWsk<+Q&t|3(VD!f;2GuTN5FH zE|nt;&O9!d;Z{MPRca{D>0du~0&ldu>!c3Kqk-JsIWhY9qoTi8tb0r8RC-@dro%yHVJe1I=#0mKCx2>p^$Z8T z{!HqAKJ>#^oPBF~cQutWy7f{|>3#zEhy{q9$_%@UEBhPrHsXAjUFst#hD+{y+s;f_ zCp>igNTO-OG3^4%=9qBwyT2nbWBjq^=dJ2v^nO@E8MJ{H#+ zULm8LHDqr=>Qdt(Exo_yu6c!%mg~Mb)RVoj{*;Uj_W+d}W39{RsJt4N;WI0PS zj2AeDDmb{z8q=Hk_tJfY1R8c(Lr;GRy9~PXP)^;Nof(Duxfq6aOHW1v9lHA=WD;wtr0H!x zJ>Zfc-0F84Isfa<&F1O@`3KIOAkBaI{Z#6S{Gbo$>{kbLt1d7KzB>2RLqYxWVTF$o z8KkE&?xm9EpU>B)wmz@?cG)wQ8;d(_qGWNWJS7P;mDMfeJL+bjH#OFb*`*SFATp7`rMqQW@pkGvZTtez1(|tqsNHD#NyJ)fWz_! zPF|aOi_#m^%%|u08l6CQ*<)s!nf<14r7RDHs<(N+%PMzT$K_3e>7f-iw~Akvx^hI) z`Sw2-aM+K0c3dnh&$6U^Pcef{c3N*f>cnLt}p)xW7NH5s_Q!2|I*RpuAh(jkhHyvoj~Air%pVn^p3D{7(&lrD4 zyV9Y0RO5T2usnwF3}|4Gpx49Gvjr*xOPpgSpWZt5sMNU9ZhZcjqm$IF^S3`vwjfV@ z){KlJx~0xrRaSk;JIBa?W7Gz#i0tYl1AmghKLo*Abe zDJ<-Ng1v!{H+o+)oS`7=H5hlHy!^k$DS~|VQ)4sU;&u5_Uyfj44kzVJA)*YLEmSeO z*xBaSa>rN8#O4*3fBkms9ZIjujitht#r&Ad z$19;~>-*mNWG!fEMm+o1fd>x$QV22o{{6&4EeVR9johJ2T4Bhwd2A^X#^%;&il}ec zTP;*=tu(1!hvYps?|+~BdK~#&WSOhr>;}qhbE)a$LjAzl@q^<8IkMH{+Jy3~lcgCn zwoHHDrx$sh>&=DC!^?dof`JSwB;^JEj;q-A*V$Eh-aX{M9S*&a@uc-DPiS+poi{kY zaqM3|=)mw?iWEjCwj^$mj*c$fx|wlVz&s`rd>u>3RO!S-a+xOUxC~;8IpW6J)U9Og zpLzKo@jK}%R(mBQZGD&e-#DB_8XMs~ifYW{L&@x}kNqk-JzxEBCNXiCM)F7UlGxR{ zR<-9P|NeYYmDEYy*mNr74p&se#+AR#nUp*Lz>cnGR2uqnvDpFL!$wsT&m!AFd>|1X#b>7OQ zyYlFwNTuH2d8odDe8HVIjIW`R3VH3`FsH8Y8OO5=hYTMO2dD9l&69TVjsDl~K61n5 z(*5UCMqJ4JI&Zus;5<-Zi!Rz?e|2BK^+VuDjfk}+$H#i+Hf|F;?vrGGbaiq8yA@gw z;{R*ZC6Es)sxykX=+XZwdLZMmb%FiEzg$K|8*h< zEVajR_}NG6_Dm?dg+Uf7E-pH~OBAyLbOMDNww6^N+?ORU z^fNy*ix0~W7Bebb$>}k^$hxE#ap7ang1wW|8F6tY|2jj={0r7={h{aRb(Rz}x<7T! zo#|M$mWPp>FuAUfHjvSXgE^JUq2hex7@Ua7PA3}3(EQv;h`+w|#=y{IAu%}S$HtDON#pBx1~ly zp~GBUYEU$GczD>&&24ma^fvEPy3ZX-a2w|2G60 zb?Yn-Pax!R4*s?jLgw(N*$qa#_09EUYMN{6jF+y&WqjtfUg(YX^%<)TS-L*7z2n=X z)GD^Ixo&%x%hh}*=S0Baq_@%0O7P?m)=OWzySs&a|wZ3T2okxtoI=qb6&ui*Y8{z})m-3(vMgmj2-Q z>%L%W8=8uB=&pok1+vy(+)-zI7931dSyg4JX9qh-cSJ@8FC51LGgM?jh?@#R<1X zPb~#lD->#KY8#yVkGYXAx??x>syxf`@ORsTHEA^iG`|=K=H9^4q6qoQ?09`*}OB?3f-|#RI_)=fpjJ#aHbR-ibFZNQyo$qJL-0^6&4v zpCci%!=Q}Dud(6TULQ|alXMTps_OEZ`*)Fze1^dY3_yQ7$eCJcp9$`iW^}x~v zQZh-ejRSVxgfe2*Od?Ou+_%*r3sfQP+Nb-n?LV_}A=cZ+MoZYAVhwo?x01-miK(1W z6ujfn#fDJ!!4g|8pfD!e43)za|IqP>Sz?~DBwF7zg0jYsSMCkA18T6S*Ubu4(|2&A2eF1RCjFi# z6&?F1AdBX3--nbk)ZA2rgzV}@gs!aoy-SLoT9;qE%ZKyqr1J!uVQh0oT?nUBR_LuB z(kD)+Wm`Qs_`tx)hsS7UBtbjHwxp${wT!H93J3|kf%NlHX68qjbjG7BEI)t>IM=&`E8lV9(GBW;xn zY)2Ex4cBKoj>b_NnTveW>tc>n&Wv{wI_v4^?38pXUhEIGb_<#~kaD_jhZ0pTz7e&! zw7ML?&yI~5&AdNw^0BvH!rQl3pte0^$LjwsNXVmq1@A6pa9&7jXJTX1`Q&ag zoe0d*`?9a=3`gPC_raG)K#0YQiz<+~aL2DU_0{;+bc>WoJ5sv(VFg%FPU$^q*RbvM zY_>ED^M49er|#W`wUl}iU|(&h#SFOT*O3t}9v&W6F)Jl-5Oag;cNsWnylmHacg8+1 zFAvG0g2fLRti!>PIm0rQwhYhw*wF8>=(k zQ_t%7PK=5w@ZoFG9zhY2-;doLsjk9>G>k`$TS7v6FL)cs;r4UC&+?iV#h+=zN4rlp z0*~NOm8$sbSqax=_I4R8O>3-pQ+M|QAxQNA;$Hx5F^0F$R1i;t}I}b22e0Le{%G_H}G58!$cC<-DSSx?TYFeBolNmrl+?M@QF{V^Gs;3~Hqt%A%$W znBPgWD~n)r$mz3fbcrR{-=7RZq;8qRGYGm2oSbbswp9&qp;y+;8F)fi3qi?t3^;`e zcHkWi@7@u*jk;XIee?qyRZ^=$Ic0wWV*m2>Ybq7T5pORqB^8xPNP#R4RWMOeQ6c29 zlRY{Ba;eGfoPoyqSqM~VW=z3%$H{s|!YTouz!4$CT2$=J_IyST5+ESf!X^M<;ft3q z5kV8MfBea)=u)d{rrRF^8`;Fv^ke7TE1CfpXJ?HP9<_uPKz_PMWMyQ2Eo@vm=iq~G9v%-bvX(i`9p%-^MzVhR%>i3-iU>ZI>>bgCBFi2j z)(|&$_hTN`d^NWZ(9@gaw^!$JvvYGde0=0!G$z2@Ia9an9aLEFEO)ky?HNEyD5D7y z&|(;1c8;vTQ^+^>7h0Tq^)xDq2|QL2r(}fc19i%e3+R3aJP=Wabv!PD#(-aJJ$NG` z&WS+DMjVmPde8=>+B~+V_W*?yI0~K=6N?0nm&@|t4NcAC@O7C6HJq^h04=!@8l&ty z3>!1(QeN$`h}*Cm?oJ3nho72pn@>yWg_OyItVm#UhXHsAkGi3vVgf=i=8)?M$N=@@ zFH9Dtozof_bBaptyIDsLBgg#k3^QW2K=EeZYfrpTedupyKlcy$TnNUS~%K2Sx(} z1B%;_(9Hy$892c90HS(za>i|do625Dj%b%f@qeq8x%%e`fB;|i zjxWg1f3=AASWg|FolQmkl*UvVWBZYwhM1J8U%$4O0DgaO_W=3238~X=4iSiSvq-uZFl|&cT58B**hjgpzd{B>s;-ffz=As-xr~w+!rpqf^2$@{JduT z`JEj`-3rRuOKKFq4o|Y&Yr$jL>IYu`zB#l!tg?S_)L>alJeO@c*;m1OEIB(xG&ZrS zJD;pa(-z& ze+-2N9YWolR{$rV&MKZg(F-E6)j`Mhw-5mheD^dKZ)1Ne-MXbQf=C*4yhEg27QaIa zAqLOtA8*BcaCBT2WE zr8%?n43xzw-i5~3Z)f|LGsvvZVrM_u95B-@u}aE*-#jAX!V*&S5a3Fsi@?43-PO;-_~;#ofg0G z*YZBGvhi4KBB5i$Fn#jy@az<}y^jNJ40;Aj$Y^S^E!q{r<2ak0r<+w+vYfb4=eY(P zAXbvhS=@%h!sW0Jv0(l9@nfgB%gzFAA34>GO)>c2v+^UJJYj*#BQv*BM86RKM|Pfu z4sX3vc}3}RkNAB?dcV~H_oZsWIuM@NAo4l?04wW_j*bq?R9gGRd3(H%<%B9#8+h3@ z`@KQGP(GwSIlq}vW_`EbY}V6LC;Dl2tXsbnYFHC3CY+K@sme$Zp3K+e%#1+KX&T?( zYckBK#NjBylB`np^Zh*{`NJC`JRI~A0lk{5SFi4~J$?H0p;IdN%j%bG`zL2N>UmU_ zmcE?Lj*E+{;0=)eP_859Cj}9zT8yWKbN7-U1`)m z$uVqvsjSs?DcN*Opi|1F6E)pwH*YS(<4XSPY>H}X3wNrj?{pqpiBgC0u9M1%{#3Ji z_b#Sp@9^#{L@5pwKDIQFUU{Bj<*8Gr1ghUxly8&Zp8`>|tgb?sQx?|#c4Hy!!m{d@m}&jETS{zWre%UUZYIaBh3eSGMNz=8a^s6Oln8znj8 z2XG|}8uC%NGy`Uj2C04CR#{P@9C#{SH`r#rt6Rah{*8iKnZ}&>;IM17dplZ%!*`y9 zrjG~<3858#0FRZg*w?8*Hvy`6Z-;oo1s-X`{$eDGwD^1c9^=*-jZ?UektxFkn zHwSXWpmd1L_C$lRcwyNoBO)ql5ADmUHEVYN_1EpJ7KeM#i$vD+qQO*oeCvXGwmB>0 zNC(D#p+3lno=Zx@#Sr5GCAD*df=(%Bp^jBoTzoF|-%XZ$P{7rtQSRRQ_CWKmXx|Oa zx01icW!>HWoNe&bDh(W(-qF!}AYy?uHF5loarBsry^aDN9NS+Zx7}A#Q@4zj*=wk0 z7~FXLc+O;^MOCYJ>5k z8a%!J!NF=h*+2W7xNQ2rcA}F1#F8W5(Rt1mr+{jr)rGV$A7{)knwnm6Ieh2bS11;; zl?mzV>nAm`+w;V5o6${SXt+0E){=izJ$Z9<^+((l@ER*VS=$O;H#b8s1DyLpdQYF( z94MW;He}7o!STZWjD5>=NL04tIW*2tkYX4Jym|AcMnu`z!iKsN$!n{)yOSB}Ik=}8 z1?ywjP{&sX-L7bqIivr|sE*Uu*Y_7hcu>>rL!>`rIn96eR#_%W(bh2%|y=p_3+P$I} z9O)$GN$XCfcwhjEp|W`-&6nMikCqyJzeQ&R)r-$%{?|r*OF`td_gYJ)m>IXiC2sij z`8J$~#0woRY_~Zt;#b9tOZhd~q`C&D&-hIqueg2qQadApgALQ20FZtD{K|?ps7%eV zC8jz-aj&M9Rw-IYlsNo^tg1Ov{_V7SJwO$>tVPS@{C#VqGde4n#BeZ3PC44!A41!M zMg0M!b`WV%T^_q{xn6>!bc@-zV<$$D@t~g#MrT!h9nYDX@}#7sG(kSe zo-5v(qI0=cH?-!t{7WZ}^(~_& zlU72%d{$4zK6X0Zv(o0i3qt1~UUEowTlTNoW3Z`Cla{Qgx8Kt#X+ozKhZ zMUE*cF+e!qy_H>=k&*CkDPNhEqQTA0MdWQi?nWs-Qv>j!MVDfq`-?sTs5?YFPBR zdxI!$A-%Sk8c@Tqrf8t{BJ_!@v^K4*9=BQl(gYb5hMMz;G#58|kZW`@jO&5qe#FZs zvmXaEoQ5^AWTMn7oJxL>KHp@QbcOThlD>_iucMDjl4NwX->>1&`E}1-aL~74n6-)j z3{=i!${_Ud%atou_;DR?La0B)pvR)C5897)Z1l&>DuO6<@<(!tk%3_Z7F^1Og)3Hh zI06>@xn57USb5*x<+H25b*K7s*TuN04|jrw&g$6{p0Tb5{$e7DxVWYRE2s41M-IOe zSHC=yb{Z+B=`2*i68c`~ER1T#E+!U%UG<}_%?&4mnwnbQ!S>Ko!~V(uN8mHMi2iP; z$n}RV3$qELp&05tBHGbRadm`T#Y>9C3MIG#v#3K`e}B?cJdbvM5Hh=0U~R(Z1j^Sf zVM|MO^&_}ECPj!f7d^O4PJcSoye_l7dwFyU%b~=5AQ$(xuFijA!kVn|eEakUD#d2& zzS_HJ?Qdeb+Qi66?l8EE5g%3AD3naT5dL{c2>^r~o|;upxCMx+)@EzPxSnY&hu5t4 zWQFnLfB^f!aWUuFDu6_m;j|a=FP_E3{OIc=cPFV2jVs&Tu2u5uuUWTPD5kLQ<6EgI z=X;Ztu4e?-nb}nt^)zHVZTkhJ*7MW(A{S>^f0i)pZw1!ij*kJEa*wqT?%{Zk8Tqc4 z*U$7yF|LgJD)!`4WAGeD5V}RB=ybf!s@i0YK=ktIx}_VT;ZtO-U#8f`1jq!9DZvs^ zlrVJv1ds>0P%y9Fn%_!R{mPUV#y1VOPGH%Q{|NE#UtC=Gva^psVpp5fpIR`royBQ{ zid-aj~M6@aG6s}iwiSoojmW~&&cVrHRLz>_Hbct8pZOuP0TJ}dBm}V`I-Qx zIjQo&7+BwzKHf{N(?Czke&7H-IxZpWp4T|Ij^g&6oKx39bVpx^J@H&pC0YGFWY_4_ z2U?4sV3f_PS{xZMCvvFQK!A~xViuf&cO{7~ii)61&!;>CKLB9JhRXMd>!JwC74d-W zQ=Y-A24f&CE1N!-k2bjy!sAOZr_upCQaDi3{Y8;!`w~fE<+e@ z)41*_qrWUzBOl+4Ctq5gBq^HgI%6~#ehE`A*~XIc1fj^Y8>T+9PJH)n39YMFu*oHF zjswZ&-@Xo03|xelQ?JO;;Z_kc$H^#VvAF!YL9V5;>i(Qy?0HpRA6k^-m@~gvo12x&&&&>lF5oQa(~caDsW?dIk_dv>h_ zt`!)CI{Rl|pL$nr9ouYK099^p>uu(B_n5aXh349SdFq|Z{V#?7SA3>u4+WY&OetZw zT;VZLhfl4{?14R$CX-;H5D)e-&$}8#nmGKs^0xcGlP5UlP#M zHsE|Xkai(qLQ}fmC`3ZErzO@kc`6bVUu(pMHEy4epXh$rFs}YL*>9Ah-so=H)!R!T z`(SZbm%C-RJUljR-fUae9xE6A3Boy9tvG6$Mc z;%0DrP+XWT#8~t^90Z5_mOocZ4-zm|VXup~_4M(HLnjl%xoPLl=U00` zQu)SDf;m~72s}%bE5BSiqAg+kU0H7|+vZu#Y$pb{wbSc2TAkGzY}UP=hYjCIybw5k zxox(|i&48jtxa{jb|#AB76%I$s7zIJXF7)vVTL0{&!;1F#HepOQRMoE&a?cUO<7F^ z*_lfBa;g{@aAaDK?qNO2m{@03B{`{IUK!WcAV5jc8s;}^(*`ac9_&qJUTxT0^)W6j zi7*|67G@-DW-{Z2wd}H-Kjz1^)XqXl*>@*Bx?KEkUcGiLcwD23hKpwaVY0y=5 zAxGVX8Pch9-_MVWevkbz$o%<12JKkN#Oyx;4N1sVl#ExZny$4dx|&4Oe0kAcS5A>K zWIAvs+MVAF9~z(hWCiPn`|)2*Bix7D2i)^M7mOUQUp}!cnRc)L`D0)I{p=#-v!8V> zpb=H22~JUQsI1|UXjn;!3RjnR+q3K6<=I7krG{5jJe5P#mdNT0!LBY>uFqBG;Nyvz z3aQgvS5_AIhjWtvW0lB#!iZBwj?R3KyW4e0dVun2~(>rgf*bFM2WyfR%=AfpdClw zEk^mbkS*f>mAY^ts32*#5i8-$R%uMAdQtay+|JhdmTEdwpG7(IwozdF?~fPNg{A@L zO?{V(zJ537^RuOxX}+ZC$-=Y)GF<}vA9kG>fIHt7XL(LS@;nh|iTt*!8AuG_)EF3a z$G5V|$YK#*W2A%n$RfcVVkYn9`!~L0I^42~5c{4X97m31OVR0TVJd@gF~d&aR=g!S zi16n{K@zJeAILBzjV?9|sa@YU-4x+24B!9pj_e((UyZD8Nx!J7lJB}It1ABUN^X%? z@Lm#eUmlS2~b69Cnu`t72hIx3RuZ|X) z*s!oLgKsZbiHQ$4gLLBjV;57E_I>yM!v|he$f4|ZI)b2s6@+43WNCg}Guyn_C^&SJ zInz?;^%}S!=FLw57Fi1E} zz!POT6aooxnCQ776%BRm@P4KGKPL_PDe7G!B4OGE4&X}f#jw zKI^k~55_rJT3Y%+iM|gb?No}F7LZe<*hwDRx|Y$h5d9068>kos*6uui5Rn}Cu4`h? zBzdVMDdTrDV5FXWiF;)>bMiqJyYf7+|MMMA?*|nbBuV2>8{YW+yP5?UE!XLZ38hzN zt=iARI{VW=;z`EU>~s4$IGAjZsh&BH@R_F=OyfHNm`7p_=vdg+$^XP-a|GmMJVgPm$U`2bA4q@v|ri_i)A0o0S|#9plH zo`XqZ^U-0bU!y2qNl8hT2tJgU15yx2kpWEV2j3Kj8_EsXhE|ExA26Nyqs2*@)K1ft zek$bP=$JITGO`JG$9#e%=bj%||D1j=cq$pR_jOm0w!}cB6L#KG(d51>$xrX!y^Aja zMkJY_nhF5l)YR0YAu2F1aBObaLtGrJm)hv$q_R&=&Y%bcGS8Vx#6NXtc0CM%-!(I zws6)bsU9GgSEzqZE7swz#63pa0 zXbGw4Y62P|ORRHu=PQ%%Z?H+Bv;mH90WcUogVOSHT_^@XYg)p`M2b&n!AOkrQhADI z&KoS0oa>{zNpv2DHLN$d?>^(Z*&~_C+}s>-yR%!+F@{&vJEK?TH){#QB=`v56^##I z9X60`=+Gmar@l*+un1XuB1EC6Z&)xZ98GU)SLXhloSZ|5F2RaPD6B_>APVOwAer{g zPAHdGpk!UT`+2X?21nNuyAoAwW&@sQGe0k1ps8h^$oi0_(vV`9G;i6vrA97VUcP0G zvY)-SjKhV88)qxe%6t^h;!ssE);}kAja@V{N(7jl&qSoE9DVLgNqxPlg{39u#5W9d z3UlUoIq^xBx7s)$~Pz|9^MJCUy!sX)e96+yYZSf9Z4$5$2Em(B990OaY8!4nUM-C za7`ouBQV}`DA=N?_RZif+BJh*!*haIuHX^xZ@ZHDJSxaxnkvS5Zpwo>Yv6`yU828l zc(|5Hb9;L`?O^)A0*{=%R1wfb4!18EhBdqAp3xN8K4!L-=WY%wX4?Pt*tvOHOJAyA z6;(S|a-W@VUOi&F&yty(nz_bRkyIFeF={XF5)=&V=+I3|PoDw#+Ebs(Lcyuf1$!7r zy$!@60RNU#4irpmL_O_HakL;Icc6?sii*Tx;d*J`g!DnGnkEwsZRbhlBp>=6hu69R z;lj_8)jGw(!t%Pd%rVLSTjxulH57cW6R1VW%r!XvY=*`wQavDjfXj;!=NB+6FPyRw z*eg~5zaX~(uh7Zee-M79Bp=L>J5*IwK>>ctwuqj^Z!J0MJj;L4VF7i`%a4gycNH+? zm2KdwUfB+E#^h88y5A<+gn#QQ$3DwAKWtT)nD zObHXVd$8i(qiBhbyv9-LUvvteneKJCLA~zA5EfeIg{65xcf;$Bg1?)?#3$z*j(4?A zrAWRXji;$`Ng1i3+j6E%ZFd2#U#jOppvu1G2^#uljrxBkwmQuGv_d6{)M4t)Uk zREigszhgUZp`cvgv)rg?FTY!jasK(x^!Ru*?uJvDR*Mj1XJ08On59tn5|)(g>}+!g zR~)7WN{LAhv=)KgWNjie4i#;F;yksDJjw0eANni|4GrBU>!SeV{B{L1+utI$=f&EN z;T`|%@7EZZMAf@icpK+XlTE$WNl8hIUn7?$EyiB~5dTf&4uM4a}#R?@Erjr$WFna}<4joVJSJ|@nPXBn(^9pe@&a$M{tS2)c&j{*jzOv&s;{RXp zQUL|lAdDH6B|~|1ba<3g>jOEsoB)B>m57RpUJJ0W&L5Tks@`VBghnHOTi!?{pu)n9`;D15TvYq2pw=Qj-oMiCx+IlGs+x3)PQWWGF2W&`gjn3!uEb(x zW%ahCgeLhbzvuV)ChR88`Pry0D7BSjR}vLreg4y!JMYb3^!GpMx9GTadG_{P#9uDT z>f+d?>}~RuEducuY#qcPt0U<}a5#N_BCgXl?kD(hKlk{zpK$b^}ctH z=<<{;MKw;;Cv6fJ7YA1S0Y!{bli%jgrJ9B-v`>gsC6o|Js>$iDl{X6cQ46PYCCYoWBFtuioW^PHRG=a54D_1U-M zx{Emk1pwUM4RzHDj{2q^bihf{FUif*tTtS7KFp#N#a)+;T`cu@r_Tq**rfseg+TBj zo-!r2^XDf#bfja^>xq>BsP!I~j~qyi1xG%+J~+(Jy&Lk;LuS4E!XyQLiun!ZkGh)NQbL zb26M%x($>xzOX7h-hVv{x>E zTk|yxVw_{@gIIeQIcN_i+YwY7=d3+)z3)S$cz18aE{s#(#uu?cUd13$_j3DXVWe!N}!sp+tgQB^I?6)_MV0de>%`$ z6j=hbK5-gbjc97s<+gzCZ!D)F`-h;d6r9Lh)8|`}b-WxyCHOF6x(0`DWO-Op{s=td z@EvwRb;;4X>8+oeF(R07xNpQVgbbhrrJt6)LkP$ph--p75q_YvFg6C{z%oKj*kq4Z zAWF-XNF`q)eN#gEGlbsc#(A`>D^2&`RaQX-z6@~Q%}^~JZ2aa=dmjb{mf#Z-haIY) zN1v~s-#cgzAeg(q^^nB6sp5=zZKNrn>uJN?+G+OAQ1FRC@IqdI4Zjo?6mbt?*$YvC zMj!CK*4iXnWY#K1`a~e46h(zh6!C=P_TWsmLHnzw!<7Jge2(XQSuU>RC zLg94K2jU^qik5iMjW|pXoduJ2)NyJ*8o1Xu-3Pa6gt#0Q+pAbSB8;HVQ6;0PT@|RONq)!Bt*p?e?~_}j>XkH3JADeZJ^|nd8S9T$)OvM zQV+d2nn*VQ_2j1Nq#F)*=$NvB0+5*lQALaH01j~Q7^Pl^54)DxL6%;0Vm-+@1)bS} z&C!lOI6LkP447mF7NNC4qo;H|DwoI+2OHj_jh|^&=y*VE@dzbDVT5YvRCM(Su9Saqee~z$GUH5j64`-wSdMn9w<#w*_m(CUbwfi# zPWV=g+GZAx579~vGBBuLUbmBQFUK^Dsl9)khTt5Jc2$8 z2r!1r5&aHU0I$5fJTY*31U%#PLEx_iM0qU^{oG*Wm?vp(Ku-H5&&~|d$We3$QdXfW z+AAa!f?mBEQ4oM>Fs{A-{yiAy03U*`3m2v|Y&cvc;r`IYA|;y658gW-V@A=`+-YH9 z0SbTF2+56sqkIIlwkA=TYbO&E#-e8cP>6gN5f1l1emsp@Ql>?O&SDD_xEH?lGm?o7 z8Z!1#yB{ymM!z2{Y{ygL!=f4}s)6J)d@Tu(#hSzUK;!$21%V>TJrTP$Ig8tRF9Y&! zavUyz*c4C*laO^AIx>B*@`w3iibmeh?t~DSjJ7*>p24FfG%wDZ{TTMJ$QkoKJKJZN zHitmXNTPIz1ed5%&YgoL2XNm946>h)xFG|lh=-0NnkgXT)hSxS6yna%)C?0^_Ctl_ z_KoX}+k_yj*xR_|S=2>>zu?w@cag5@xr|I9aMXWX2o&^wA2Z z*SXx{SPk_FA?(kbS&Jo}@~Di(Kd{<>O;og?TvqRHLsDX+91d@YG~xi91I1LCojlpe zDLuDua3M$NJ@uQDi;12n28#3!595v=Z+g<#9_PP)XU2A+I}8hdsr`)w8to z?_+6u`OOaqEzSEKx)p2FY+)C`zveeUj&s?a0EX2%huQDSPEy?6KN!)^?u*&Ly?ua9!4pKdil z^M~l>Bwr&XVIUDTICK^KK{2ZEO~4N#P4H#^o3~cj2v;{|ygU8y)Oy{$C)7nK`hHRz z28QeewDwfz;NyFYF)ASUJ_iewpMCYsEz%6QJ75(pZMrcN3^}9+R@uMlomTq5@gfJD ze}0EnxCQ(Xb$hV&r1?};o!2FNf!^bzKvb%AUX{0{6wCLY)~e5yaFNagX}pDs$prqht2qpYO;OaH1)-QvATjJI4A{N8Z;e##~;mfQ<9hE56Da6 z!;l{p&re|Sw-0@9`UcVjbv=1I$vgqXeaD8j&YM}GpLuK;{(jS+h~4GzngcJd&Ssr< zGhO>sIr3k7VUr6XFK#c6vE#kSx~IGIE%MmZQWipfRtPw;xr4?b-~Hz+Hu57(KesXW zFnj*RM}_(L$TgD{yS6p`%^zgNsIx@s>*}rt%${dENDcvD;XGQoBbau4;T!222bc^9 zPG+9Z@C#c~H)ePeJx>Y@d;<{1k4#KFBWfm~%a|xr^3N4C?Bjpeta0L@{(80xY@Uh| zR0e7~8{hrSrr~pszIdH%S_0S448JJMgQUHkK9zBXt7a*<6 z%MyeoKt@b`41j{yz&UB@OPFOr%b#~Huc<-q)lRvvBgJ4QNbb@v#4BA#3nHtT)h9<+TBV-1RHdKP|7k{TgVKygpcE?VH)0}rFWGVC>C}(?tFQDoY=pC&%iKZkV zh(cj3FHH~O0~G0VJ2x83tO7t`VQc+DaUFRP@|)3Bo`mwj;^#Ys!A*51bakHz&DI0} z3ps>Hh`z7M_kbIKXMucQRg2vt`)q9kWq}AuD#g#1mY_X3jf0RPUkmbWI-4Mjm(%mS61n_z34FL&FpFR_H5!$7W(ckH$&mmS9(ixps?r-E+{S&LW$(GZqoVzmC z4+zGNS3qbN$caWV+G8ApjbWptJm>W4rdR;Kt7&PkV8)wDk;$|}+};p$@2^aoWfT-P zLWfG6BTCWdJ=Xt~Y3n%$fQ0TC_~9O`i$@Cttw%@+D@1aj)xTt!wXVq-LT-W%xvl26 z1Ze^IVTYCuZ9b@JmCEW2Lq&k$MB~oKmoCw#XZ9gqWbNI%uf5s7ZWpS*!B(R=$0u`; zdMDfQFf#IdMw09YcY3i4U1JzXE&-YavPaeS_d6zUu*!mz0LKv5(z@_IQ1>n-|CN&$h8{#6X)v4{-~#UA~7ho0q? z!vlQ3M`0$POiZ~vQw`VXn+vXcV)QVaE~s#_=uxrO@9mbo0IC`fNr&9)4dVRlv17;X zeu{~S8Lc`&{|KH5v@Jv<12-r`b7MaWp zm0%Eb78mz$1;3GeqRAU8Oa0k6ByhxBxqlMq*>+4QSS-g88sL-IDjuT%Y+s5wV^MpeVgY6vgllCFL>Q^lfR$ylXF) z1;NcNeB3Zc~po^l~DqzHbK}hmJxR|pL8Ev3Masq(K9#P}M1<{hzY;PQq$HOaz z3_Cf@Q>4X%qvr?aP>zhmvlgk}F;$Q}A2eY4ps`d^VXP|yWKYbCz#85f@PCPy-$T82 z>ubDnL&(P-@fv@9{sX-l8ynj#bT(EUA2+nP%w3C$iXs~dsU<{J51Gs65H2PZPfU?@ ztpJ+4cb8T}Dx+-hZ4PDT7x=cLwgX$CE6mQzQv@bOR6_vhgV8WUNT_Ej4C_pvwJiM2 z<40&{YWBwAqJ|or4WXUU(a~xkS#d7?1XDoVnq+sBhYD8q>0i;P7u2aV!PMvxuwD%A zH>f#0Tu2ht_X0;jpiw8|YN{c9#z**oLg^EzTK&jZP-hNUc0R@uA`wNU`a+_4@a%RE zPs0*%9eB?#s7Mf)h}XM$zIACq=c!!UKWY~uZUpT@O8eE%VWmj0Gb#KqrQ|z>l@CoC z-h9WxOQ}UV6>&%!D?ffbpVi#dWWubUQ<-)~4dk6b=|^;~_n30Ke*S!eC_BUY6sD!w z`XZ=&jV`W>HQV?2@#E66GOkyP7q=h2x;WMJ1#(|gwO5Hy`)(>Ka2zdKbc_-_a`mXM z?a?*ks=LvF)z&eN0OoB-3W*A7$gzqmNZMs|&`?~jy{G3ieBD?j9o^lHc|x{|EtS;o z54ryOjGsTT?e!M`X751}QF`i<0xR0dghHk)jO;7uRCKO@N(?R90nQMI) zv*AmW2z*OPcj|RA)}Sr>s2&Cdc~%>|tEt(9<2-5d7yNl7W4Cs?8f^vqO2qeuiRJ>L z*}!=1=dEuZYtV6-VFwcci++1=?_Hq5@a6Owox12yBtAcKf?!AtlEwJv=pbB{7VLEt?=(;R9w+E~EfRFf@Q__Ao3dGUq7wN2MwD4$?jqqD%7z-iU^azJTcsbf!9mrBo(vN3qdB9YIQk7%+C zqdG4vBO)ay>i|gag>3VuA!lY(>WeI$y)NVCIh5XZ050hv=ZAs1oe4d>4l5i0>jezs z#1U=4KL*r-Q=&eB# zq-s38DX6^pXo|@}Mlbfu&CQL{;ykUjh^NNqJWFJVV4cL_p16ZwaHaBbSj4AyBm}NqyUW=&2M(-0j94@DNP0X*)T4?QRkM2mC z{-)}lbjp#KTg$3qZ`frfq{oXTDr#&N@X2_R2HgW>`5EfT+fRS}tE=VOxbgX5uRJLU ziJQ5Y6HLalQR_!Cn_nP-kYCD^QFN{GmCE>Bko#)m%95{Q zKtUl}y;p6}ff9PPuQtKsQzj4!;9=y@078F#xL}(6sa@!@(nF9SgcOtMTZ6}<4yeDpg6_c9$8->Z>&!eYMf0D>d`6j z8`XN^#I_u2I!XW?Q0sb=4%ZqoE}@n8xNREh*u>pByiYL}YEqW?5_sgt(bs@X69eo? z(vUBpV}}UY%i#=eKmx|gZ&2liV(aqj2Myeu#^X5+C!`zxn`GPcs7W_Kcp|k>@!^}k z-l`d!0m42pMO6$V%jIr&bgU;TZW-EJrPaos-2n`|oV4ZmN*7Grv=tdN53lSwD)+E+ z*>U@$ePZzF{~oLb%Hp}BPnSzWLdAK%C`o(=bHl}9qxerkda2K;9N?ziaX2VqN%la2 z65Ekf1MJIAAiA*a7k#U~L)sX09I(7KcO6PCoK37Nxr4WcyE%w+QCK*ywr0u6$%&E; zCoJ?sC#_rt2SNJ>*0q`9DH44L2AvUf#nr&&*5qg#870oP;F6=pKI5jBS5AS+JbwN9 z5ge156_u}nfAdru&wr6Uz-UkCPV@?Cu=3i& zT!B9hmOn!GCr2!nVe`3JvU_Tv4a2Enz#pRVS^ua_pI`2^;K+`v&{ff#dHcgy2)xYx zp}X2{QYX$!`R2BU6>EJx1{G%r`=Py$iqe-}*$JFndDQ9G3n3jRI1Bt;M8|!z!GONE z^$Qx`1%^nic^F|6P#ZrVN*6K1jU5TgC-9L$1H8d$)&o-l+3q-(#erq<)Y&I~fqLxP zq(xbM)NYjnyuz@ehBW<8+incPjfyy?B4B zT+k~3nlC~@PtJ8%EVmq$l4qD~ECgTd*YD7kSvdz^DvF<%=-06w6;O4DwrT^lWaLbsCqN@!e9XEFJ~B+)mHFVhaLyzgA83PUn}m+;JQgPxUONc+ zL3Y*rw)9)KRuFiXf|aMzm8-mEI%gvla)PP`y4@FcZRrX*By_wKbne~tP$R?#UwiVZWGCtK;^=@_!gUlum#xMx9;4j zK>{K4Gnc13^0VAS4tgB(97}W#)=CKlGJ*32r!5p(r9fP$6vsU910o#y_EBKqdzMmQ zuzB<5+fvcTqpKM{MH+b@WxWdvC#L;uy#i43()i+3ztC#oCa~Dqb(Vk`(4Vy9tlNXg zV{a)%*0Pvy8pVudD=fBe&#t>;U32Gdjm(~7dwxrFJwq@2b%pO-S^9_k)U%;u$+s@& zoP3`Aql2;3sUR_1#U4HfqObu5Q;9YeTe=HwG6{%+APcZe6*v2ycTHPv;3`&qXRegq z%Yn(l>!VR*@&pE6CXlJ*8Itmqx(CA?nTmid0?|5&v(ntot^{^tqI^U%BxE~q!9c6* ziwt0!-Mg2$VRNWg#Jcc-@&MLOM3G)mFRXhNAZrB5^$!0K*M^RwX231-8bJ6dXw*Jw z7WKUe+eLTAs>*$*S^1KX9{;+hk5^a#e>-`JTRr1^AR``2Xx$Z}EkW$e`Ne&X=yX4n zmhv~7pk{XpsY(ZR@7T-6X8vpBhfGmRMR^Tf#dw@-^nc}4Et#gkm?o8wm8MxAzaJ-? zf{v`!oge`WNXsID75S=Z)*59VK1PFUB0Is95E~d;`b61{MD#$k;$Y7ia#;hu5@ox+ z4*Rwi)31O?a7FHCRQKNIJV7x2z}iqLBnYR0X&w8$Gp9R(%c?nX(?W%4&+Q$qt14Fi z1p0_?xTeN{zmGp3NeKDkv>CSG9Z6~ z$WZc;wXYI*WD$pUZTIA_b{|#41(IT6gYC$+fpl?jC$a&M#v7(%5i19g7w;nLl#VGL z$y)LZAoj0CIO4ePv%m)6QZ4{P5`5?iHPT9dVAjTgi&u37S{r}_SvmZgXO+xrw)d>F z;;m~vw`0eT$hxZM+TXj&37%P}g*a1y1bsgC1<0ZqknqvJdUZ3WAVL(DEn5an+*Jr| zb{@4{aZvB@5pHXV`ebMdu+r@PtA02(W&O7@!kNof=>JHN}`BX3)YWGmm3nawn@C z1%9*4?ErYdAW>1nxkEwj5s#{twGb0TqbtZwhioDunhS4!LPkNZw5oGZdjWG8^@432AX!a6voLU`E#P zbbId55n&mH^>|EKsTicx!;Kd}dXL~we9^g}gEs_>zs46iIy$E7m2D=8@R*W%nsWFe zA|kK?6K@PaT?9|2p+{{xa0X&+hox&H#0;3bq2YTCt`L2Tjf=IURw{}D=)!|A`5;<= zK%p2MVOf3zJ8C?QBlZ=e<=J$YkZJ%*q9n_rzaD|o)3FXXLHQmZk(=77C;$@SUyw?4 zBRcxZQ|)PN??Bw67}GRKAPDh=iA~7sAUCzb_XJ!MNN)*-JRI48Duyj};&}h}eyvU! zU5qwX~Drj#?XT)3eB7SSBDZsCF=G4?{w{ zAd7}YNB@)GJ||GKDuET;#PUQUY2j@$Xv#hWIEkoCwrq)epnDw)mQc~xfBm>)hzKMU zR1l8}soJ9OKvwEMah3v7T_KPq8;y5_!t@ZGXMCBz{q^hQWspf_aC{^3QoI06@uHrG z9KI$4+bMnwj@%nFjhMRqqVfOz^V4#IfoutgHNmLxpCFA zZ-+mxC~g{ku+!7KL`(kH*DCid6E(pCkt>(7i*mI3ITKk5v{UKsJ=W!o${^_=IF5iV z@S8Sm-Lhp1NxIwSEk#H{3~bX)m*w+LH*V;Z2lUo3XJzBH?$+I=ZMU?Js;{Mz9sAPO z-sUwoIeZ^tm|#N<>W>wF_?T6{#TMWEKkq(vZsIm|$d0=+a{HcA(y0Fl=2QVE_03<8 z-XSXR_O5&7&*Qwk&Fw6AeJrV?6WH|UXLu&Et2KFOG4>swzG?eCwl$SgjP1wS63V#@ zb#G@*!ZkJB#+4D}P>;zfU;S^y+!hqD=zZVyLov$IF$pP^;^!*u1&fxK23oBvnYCwj zdrzD=Qi|&HCasrK#t5AnU(>11#Jn!P^}S8TF3kn+o`A58X_Pdg#9Ckj$>hg0N8=yWIC` zf9SFGKc}FtdJ%f_7FVCGui%Q#ZYuMyTkmg3{+IizMKCFC-o9+hJ;#c=Vpw||%w`k% z$1C-&|IKUCn;lud`o=r`>&m7dMx@n-sAPnW%pKC_!j?@eVGTwao|l*=snh>D-W(SOEo^& zwjzx8HswG5#vJ(@QRk`c&_8+MS^w@R@RL1%KkF?k>r)aM16Kbuoro-3Z--y`)AkVT zim+IKN!nj0hSu>K&S={oE_SLL4;NIXpA#OrqR+iUbrDYWIZevzuP{Lf+|r~ngtI2IE)@aKmmK57kWSv3IVAh91uC@+72^r z+XkijR)EDX?UZk=&-9Eg;M!%BDmz=0)0adQ`6F|g+*S$7I_^Hls5+wojidOmQC=hp zO9YigkH-~g3d;X!us;--K_NPaI!qEKXcE8;Xy#36X+l(@=2Lih41sasYBBIetiX)n z-FbqcL3+4weblGZ^jI=wYM_IuSFQ|GDc{zl_glr*@E&Fq%s#x(e-X?E$FhtP5AH#_ z!|dRRNF)MA<6QX&-91>pzoqh!V4xw8>J+#eO1lOCy{8w6HyPzoohaYbO@VQr1G@RV_LhyPLFo#A!>G}dF zK&73%$G6yC89H9ixn`1fZP;NMH`vsorqE4sSiv1tzl>ZmBXA^Z0_Il0hc}RH_1ho$ zxdPqF|L;UQzz6~WSI`r+OEZZFG*0r+4)MH2X+Ru98~}g zaI(Ui!3>-#(!t5VAil1fVcWI@vDTkKK6mfm-=8)N;(!R8$^CXPF0}ppc@w%KlKu~U z%5HFcC%YHGX<#Jz3%&R2ii!%OZ~|>$#*qfzf%qX%VwS`#3i9$X=jrc9f?loxo)kHr4%OXiQIDpfxl+X$E?Dqj1$%`umfP+$j zdBv9pocs~$iEp?m1`&9Sho8IP6_LB?(cV(E4Gi4>ze}P6{|M*)!2}S10Q5oC5!QWt zoXc8FwSGsb$^UC2-K^wsHO>6%kKj;`AI?%qdJlFVDp6oJpWw(Sh1!k?XP#POT%PE! z_kq2Ys;#HTQ#-XA41g{s7l>z{m)mgO1^JTD2rd~8-ABH@Z=f_gdVORU+6p}kzd-CO zyN*UhN0V$h5Xr!VR|45Ha0SQ>pr$+2tWcf>*Ezwg>y0^uz)@dQw8{Xkg6B{EwRhji zl@Rh@+7dhy{H!yMDt4y3d-ZM>rb&_TQLEGlX%kZS`Z|yT(r1g0bd6cR#SZs;l3-?$X$ECN8T4-?i!A5r%ZY1}J0SjtE3XZJ zGFHQ*urQ*xS^}U;3M7z*pNKQznc8_Ee`FZ~v2gWMC^}^{KTrTZ&&7pi2?Qx8MkMr$ zkn&04&O)33g#9P~5i=r5V32W)kWgYHRmo!CxNk0#()rg=AW>MjK|N^>hY|rOTk`Go#ERH~x?&InE8_%JGu(BV zRg1)Wi4aI~MQ?`TCL#(1BF<$Q0Rn+onQ)~{Z3%|SAaI}1uj<49NDO%SxVtMRrJZP- zkD=v0ftBs-@kuVA`XI}{=1R8s2F>tKRuv*t)`9bpL znl-gE-M`O#QMZ;6q|`#A$x-y?FA=|h{Ru}N&y-EZ2Z;0!PJY?3 z_O@l`CXP}Yc%~G7@?!tcoY}Jsf;uJLBIr#>$O0C_^M^tKDkz?q8ptIV?Z7`Ww?+Cu z1$W-u6hnbBMFjZM;l?ZYRK2*i2{OV(HZ{3T(8S>KdVHsqUY2GwiX{n*&x)sZp?8x1Ilmeb`Z|3y}WRE&LzGlWCm& zrpioor3mKgCh0@qc%T=)33RKyQ*X|y2ET}^r#UaH;-ndfVit@c2+374aIUy7wFlo` zAIBI8(A3PctuT6%kK~pH5#S%9CPRE#k3b-fB6se)2oHA!fASZV9UE|PkUq1k?@6rd zjT$Mn<>>BA1@ZExQY1VQO)E=k#y_-{V-bfItHUo<#~b7S@L{Csom;p3W<=QanBSz< z_sjY*BKxJJul>EK{;853EFKmAJ!4ECs$vR>(SS^yWN_eobtpU2lSO(O2*wF=KrWBs zQhAQ>6!Q`$JCNk`2caQVe`TT;Uyc;AbqwMQ#b6u2!uWCayGVn~7>5SrrG6#z#vm2}pcwAKOVAGBVIh9v`2Urfd|jIbBSfOdV3kHJ5(` z?OdI#&2~B$;k1E;0_$Gi3d+J4cEy%0uwPhKh#xR}sHTp*F*ksf#Jr)@nYg3H3BDh^RNH}--_#vzO5A>guOo*7)GFVSEuPNuq!LdX#aK$S!X0#7HRbV$vK zd5v&ejK9i1bt5o`jrQK3_H;c;N#DFp@JI_H3BT9f2 zrLUdDjbT`Dr`~weZ`_+gZ)V0lUWwMxbE9!}lt;(~V;qetKExo74?yYs{8vkkmA*~R z3t$w1{hI8TA{@i8{65E}$fzgxUa_<&$a}?)uLXFO7mMn3hNh;Zm9_OVU@64A0pCwF z#~L7u3e$E&P21eiEqw2?LTi&+G1DDjd^@y0zvw%s7#l@tk1-BE!{~v zsPQ98`X?J^*2GPyc+MHuDGPi>8??LC4)4gNN_!VVByZaxXM?G2d2Guk1dQx1Aub&- z_DJ=(4sXvV8&N?)L2%paULzY=kIx{90~verkMjy3G&bTrqCt`eCuX}?d+}rs`w-d0 z)J{l_UwdCHdhV6?AU!=@Xl>J&P*-Nd0nO~!Xj$QJ29^ll#No=uC`sDPMKJmq>Mm&P z50?RjTF%f{mo))~=lgyQg|3)sq9{ZQq&e^scu1DX2?-?{Or3L8M#ZVC^Rh$Jy`JA7 z4Z5wYv@|Ge`2`$kZu=7aJKNEsd3-`UeGfz{=;m!A*AM$0b}GP?kS1(a=N$yu zdljb`h1?GXKb-`(BU&Mnlmn04wW}%hz1HxsY&@7;gn2E6IvZVbC&Xi2K%5Wv&yy6{ z7r5Ap_+Ao4Cjr6`Ee3?85(N`_)vyHHtj@tPF%B5md~x31#ua!FlF~^|FFJY#2cKK< zuNdT$008}tj=egZ$N@6oQ(M;IeXL7OQN$^~F*L|o{WLmWurXfJiIUA<6Vlld2KV#wDl})|^+@PlxpKuHpnuXFqL&z9mYJM}z(NrzNP){ZfDfk6 zgqevAS3t1}5HOa(o(0KwjL9k*J)<}T1Lh@DZ_9jf`6peLj7zGjX=h6%n!Yy}c(Y^-L_{9!6~r(=Jee%6^BeGZ*5qlUK_%!qxv`xLZoqpn^Ph(> z42@U?_}ki?dd5q@Y!R)^XVxN;Ydh!#>pZqON>WXsvm@ea-{8rY0kGy82)Wo-+=}`N zMb1-ri_i`OxI9Jt0=_>o>$Te<=4`{-jK%2<&l2t?lZG~~2N9*jJ_-;4nzX*cwE4f*J7()_kbm2?>cKPQMOMqkk^UNw%5^t z^s(;!vQknWsaxzM(5M`}G-u9L%!4OMbBU0g6HWaNM}Bk<>2_}?oJ{74{|0(0vt7FH z?VUVfjJPyhbaQEotA58L-(;Fd4>nH%%qE`suZU;7Enr&Qnn6(EFzM_=`2~8;^uk|H z{sZpa`TwGte5Za=9KQ^N)%FH- zB2BtB=b~U>>b`s}o2L4vC*?&9xwiu5|G8kuvr##D@c049_@Nyj0Xti_euNDZeFIUq zg35!etrSe?R$*I{gwH!*=E3mvQXIzX<*yM^2fCbr6FY8NBVZVOAqAt5E?6SQF^9jO z&8-)y>N_Hd4s;$NG8B`h>__;hRJfCC8$_(cgGj-m)Wht|XbXL$CO`n1*N7v;n?^7i zIJa>?EX$yTKZ#a=^SLRW%}M0jUmoVfFL<$Qx-%qIg{FssZr~|?j^q^j^AMbK%U?j+ zwo0`O0xXZ*Ey0-rL@9Z${pSV(A&@+e9IHU&v%2(fe;uI6EF(kKSkX)(%bxBh0J>B z{dn7hG0Gi(D7H3#0dUBqZfOR#xJOjZ+cPk+*m@=sEV{s|)mw z7Y`z4QX15aoSYnhu-{>iOuxe(ak>;GmNSLy-v%{GVj&Gwm z)&vv$XyEU#c!6>VY_OF^8o27^ij!)!pD4}D;J-frlXFX3cTNXe2r|?do+Tg?d{$j& zCJ}w}w<3}PVMR>JDakMWlTh0}Mx#?EvQ6IT&*MnA;1va5yj!)wbH;7AaIgF?kgUBe z1;P&=jLFOVdxsG)b4WXTdM?BskOt!>IVsOIycUnS&(@-KyV_9te#OnWu3^QQw_VrR zW4RvKU!Ma$pf!*`6M9rw81%7b$7r9QNR&{WWUa{GzxW6{({~$T2!t`G0tRFqJoV?2 zCjV-eXG}zAEUk&vT6@hZ^_4)t-2O7ZuShu|Nd)I#WtL%Qe8N+uNYnjoVp^Y-gdk#Q zmDII%i~vm|tC4oqDh}0~+2{?y?P8-`LIe+4F{0BWBjk1$y_7qDyhNzDNys<{1c?8- z{9whpENFV*C!_=1vK;NYm0L6NlOT;(NK8Xu(eEq@k*RN2Ks)*+@%&xkSQF#$36}Tq z>JzpdYG}!+QzuW7K9B2e_-jx>2&BM4WEk&#hw**{mB#cu=0-vUn{gXATc zS%7Qppx`XhFuZtty6xfdwAug%2CCa;McmHY@(wmsA~bw1PyDZ+Z1UXAugKOi#l>OXKgg_sGQ` zAeE_zyP-^z9@XUcx2)4aef=*I3>c`eT>p~_1971mUE}a{Xlo@V?|)k(J~zOJeNfQ~VWS!l~ zV3Ak(VtdkFNowv;$%?M6$G<&U(UXb7MomFBifzi>KVLUW2;>P&V~w~S$W7`|s;m(s zAU1rMVuRR) zIH72e!DuL25b573yw`>kH@<)17URPhb>-gC}Ad+$@|B-WILD{Tz~Vsh8MC4T;` zwU7K}tuOI^D_t`mRH^RMU3NJ+IS-=uo8bcfKRy~2XnQ@xGxuuwn2u*=Kjpuz^r$oY zN+azG!djHJ_W)xWQX_}X(?y&0d;Lvk_GOwB&eplqz2hj-CPNg9^qaZ9V~Cp_|LDA? z5{v*(_joAOYMaAPg_JI$twv+F;~D2GcTCUfV+RPThSv?#gt=4dWI01(zF9<@Ru>4tSUxTt@|OF~{e9gCE@{Gy`y z2*+z0U>z>nImg@B)p+_j<*;wT~TKJl(Oed-MFTa%f)VB zvGNc|aG_Ylf6?#!t+2158H^cowA6?7l%_Z_Q~*;7W|Xj+%REEhicp`W-Gq(M%U-CU=aYK@oPX_f7i zT`0qy4$jy(^vKUWXTENfeYlquC~sNO{abrQ5HdwC>>r@#rm@3l-NbZFUcWYTOmt= z0#5iXt7y@x1{FQY<0Z{eg~-E`2SS|EWNkIHijYF>?4P+mGaxZ3=^NNtzcdako9pLi z&hzLRPxWm8N5(ac4Vnbv!m)@z3<^&yM57p{N?{r)KIB1b*J`3!_R9B+)?%?;p)8TQ z+TP~@*9Ep6^4nuI`b?rXlb=puX(%>GU}NGxwk9Wg0zF`u41UjZs`VxTNxtH?N_V+W zXa)V^>LAIZdM@8_Zi0FkfehCX5r=RgvZW*R^IHbkkTXg71$K}Y-3Wyk0aG;anDKOt zE?SX8lT$(L3qTMCYWHTEDAeQORm5O4PPEW^S}peqss2gAXt!m*-p_|P@k31zTh>&ZGwO4jJQa~bGzMk2 zl?IA+8@eAFi&}shVE{fJ2TZ8aR~N84oD#ShL=jyi|5Dp(Nbxbn=^I8BJ?` zk5PMM?8;38TbLA=9@^$r&b#}Y(&XqpCFq;#^DR)WCY=?)h$f@d831cYg=i+6f5WzJgWgui(ZC$3$1!M=|Q1_#)rQ$efE55y>GruTovn7G2ky4=?#BS#AMscd=q> zW;Rc3gA>387~O>tyTtfT`$-}HmVNB9CjhXPV31-=R36Kas*?dc_ue)d@ZV7`+(m0t^Wb-P7Pm_a2>uzO2b*4 zV{y3gsx4*$8O8?OZ!Y(1RO!W8IvA&w#;qq`BvGCel47PCQpi7gbbA~n$yaT4STqUX zeG)WK+TX3;fG#)6^JLcg-H2yr1%-qxj&9WJHDm`y+KiXi)unyJBRK_PzHra!cCD(& zoIqudPw78R*cB2k{fT4ATVxU-eyBh8w#u0aSWT~(qc;R$mRJpKV$??HE_F~C9hitr z82O*ny7>C~?hUz`o1*u__>qcj!QmNkysoN-1{aDa=**nf@2|MppVazCIMaB>9w`eU z((TvY3iCS&P7cNBr6@y$lSGb+H4nW=$?(Ry99lr=1wB4+&GhAn4Nqsu4J>w1IpBCs z{w0e0OX1-cUsS(6gOYCY(2tC5C#gM$I%MZ}?I;_-cc#Ou=qOjDaOkK-{|~js|MMg2 zUtpDPopWT=<>~RDVKTy+@_K)&8AMduTbnYDeV@cuL*!fHGy&QSH*% zQc}6v7@}Q&H=$PuDehKyTdRU>h2&t>BZF_gq026R-LBtX z_7Ga|ry%Xaw}2Ff06PRm50^QYlnf8gHHkMG$ek0&=Fra`^-QTcFr z3A*-bHQA0%Ll4*iB{*L<{wl;Zq%Wj=QWPRu6IvprvCLw?0tFrBG z;iK3vk*INkzAkNKLlS>JJ$g$jPLdwB6=;Y|qHMS4eSkITSdta4wz6gluy!nEsUp%_ zZbNuWOw8mP33F9#uNvA=*c{*g$Wj)CFq*k~H<`#Of&B2M=3ZjavM|YccxW5eWZ->C z1ad@&TdA}S*fNwTNquOPN^u4Ju*7LNCz24-Xsf(4b?Oj>6H~k)(0umJ=H$AkcV|Hk;WzG*en@YvW(MsuNjiv#;_K}8=VFa4 zPch_SDVWcqT4}Cgp6-Q{G|BIPy zS;6WP7t+%F_nx6gdz7W3DSlE_)n#UTb@?zwIOpmrsttKwU(u>bwTd1b1G}}2jjXggH4Wqy18+zEMIP>9#XRHRs)OITE(|F^iuAGbt zUoMdZ;|FRZ?(5KB$4_43CHFYt^*wG$FavZ&I7)Q=zEywvu}qcXG4 zt<^W!I|}xd=B8=uf}ZElc`OSkvg+aF(exdS%BO5qv4fCx#4DN43xEvhL*H49Ews;k z1XWQFe|?LinJU22B=@Rf(J@a*czmJno_}xGP#N1xMnIg~!X{(v%@zJ9^J&5DWT_EC z@!8AwduFdx|N z4G{pAF#2I1@{EA9wbawf$~zZT)8m{s-YPHu-Ftm)ii?WQexe5wHD1*Ao%@ZAP+6>_ zE$Sk-(1<3P{{b0O&pd)yk*A~lfo&Z-TYO)Ur6>m1Y)lENs(t`j=_FnjMyA?HEz}%1 z2%9LNPQU-tMo3&e7CgTV6Ru*k4|7}%3Y!`ICh-QT6qGz3L%RNc15_32f-=8#9$sd# z2hLvp?owAd)&oC(R_-hd;WMjCFDDvC!+cDZ-7OC|3w%>Lwtz(9N}#Z5Shdo7Jc@!6 zV%mOrDJTHt@_$pbSym^pJ+al23$69a)3cKps(4MnFXj?ZVDaue`k) z9CR-4KtA|oaPVln&dG8Hv6X?Ggrh-`%9QO`$TC&;?x`OfY{RsWQ~y33OB@nLQssgm zh(3vagm^HOsG>uAA|<~GQ2u|lk7sq!yLd4tLzNNB8{K-Ig>$T*!f3T z5^$6z!Wk7KWU37ak16Mx<-{wCFoct@hvz&iwAeiU*NSn6H+0TJ@FZ+j*1l*0>EGPL z-2;0?IZwwqAB=~^sVOf1)9fBXj^gkV2KAgH1IXCqxWhTc8NATE@r4|vl!y_Wq<7H= z!@B@}6a+)k^_maY`9G90YDD EvaluateRes: # Start Flower client -fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient()) +fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient()) diff --git a/examples/xgboost-comprehensive/run.sh b/examples/xgboost-comprehensive/run.sh index 7cf65fa4d52d..7920f6bf5e55 100755 --- a/examples/xgboost-comprehensive/run.sh +++ b/examples/xgboost-comprehensive/run.sh @@ -3,12 +3,12 @@ set -e cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ echo "Starting server" -python server.py & +python3 server.py --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --centralised-eval & sleep 15 # Sleep for 15s to give the server enough time to start -for i in `seq 0 1`; do +for i in `seq 0 4`; do echo "Starting client $i" - python3 client.py --node-id=$i & + python3 client.py --node-id=$i --num-partitions=5 --partitioner-type=exponential & done # Enable CTRL+C to stop all background processes diff --git a/examples/xgboost-comprehensive/server.py b/examples/xgboost-comprehensive/server.py index e4c597ee17eb..3da7e8d9865c 100644 --- a/examples/xgboost-comprehensive/server.py +++ b/examples/xgboost-comprehensive/server.py @@ -8,7 +8,7 @@ from flwr_datasets import FederatedDataset from flwr.server.strategy import FedXgbBagging -from utils import server_args_parser +from utils import server_args_parser, BST_PARAMS from dataset import resplit, transform_dataset_to_dmatrix @@ -30,16 +30,7 @@ test_dmatrix = transform_dataset_to_dmatrix(test_set) # Hyper-parameters used for initialisation -params = { - "objective": "binary:logistic", - "eta": 0.1, # Learning rate - "max_depth": 8, - "eval_metric": "auc", - "nthread": 16, - "num_parallel_tree": 1, - "subsample": 1, - "tree_method": "hist", -} +params = BST_PARAMS def eval_config(rnd: int) -> Dict[str, str]: diff --git a/examples/xgboost-comprehensive/utils.py b/examples/xgboost-comprehensive/utils.py index 51c1a1b9604d..000def370752 100644 --- a/examples/xgboost-comprehensive/utils.py +++ b/examples/xgboost-comprehensive/utils.py @@ -1,6 +1,18 @@ import argparse +BST_PARAMS = { + "objective": "binary:logistic", + "eta": 0.1, # Learning rate + "max_depth": 8, + "eval_metric": "auc", + "nthread": 16, + "num_parallel_tree": 1, + "subsample": 1, + "tree_method": "hist", +} + + def client_args_parser(): """Parse arguments to define experimental settings on client side.""" parser = argparse.ArgumentParser() From 6d8c0feaab16e0e92a077f9cd3a789fa55cf99e5 Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Sun, 19 Nov 2023 19:35:48 +0000 Subject: [PATCH 03/11] Update xgboost-quickstart readme (#2618) Co-authored-by: yan-gao-GY --- examples/xgboost-quickstart/README.md | 3 ++- examples/xgboost-quickstart/client.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index 53cd37e18aa3..309f8f55b847 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -19,6 +19,7 @@ This will create a new directory called `xgboost-quickstart` containing the foll -- README.md <- Your're reading this right now -- server.py <- Defines the server-side logic -- client.py <- Defines the client-side logic +-- run.sh <- Commands to run experiments -- pyproject.toml <- Example dependencies (if you use Poetry) -- requirements.txt <- Example dependencies ``` @@ -79,7 +80,7 @@ You will see that XGBoost is starting a federated training. Alternatively, you can use `run.sh` to run the same experiment in a single terminal as follows: ```shell -bash run.sh +poetry run ./run.sh ``` Look at the [code](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py index ede4a2bba764..ed85e98a3ab2 100644 --- a/examples/xgboost-quickstart/client.py +++ b/examples/xgboost-quickstart/client.py @@ -88,7 +88,7 @@ def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core. # Define Flower client -class FlowerClient(fl.client.Client): +class XgbClient(fl.client.Client): def __init__(self): self.bst = None self.config = None @@ -170,4 +170,4 @@ def evaluate(self, ins: EvaluateIns) -> EvaluateRes: # Start Flower client -fl.client.start_client(server_address="127.0.0.1:8080", client=FlowerClient()) +fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient()) From 459c7fcff5ef671cb917e6e8da32acdd6ef55d82 Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Mon, 20 Nov 2023 09:59:12 +0000 Subject: [PATCH 04/11] XGBoost tutorial (#2567) Co-authored-by: yan-gao-GY --- doc/source/tutorial-quickstart-xgboost.rst | 770 ++++++++++++++++++++- 1 file changed, 768 insertions(+), 2 deletions(-) diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index be7094614c63..3de4e0ef81cb 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -7,6 +7,772 @@ Quickstart XGBoost .. meta:: :description: Check out this Federated Learning quickstart tutorial for using Flower with XGBoost to train classification models on trees. -Let's build a horizontal federated learning system using XGBoost and Flower! +Federated XGBoost +------------- -Please refer to the `full code example `_ to learn more. +EXtreme Gradient Boosting (**XGBoost**) is a robust and efficient implementation of gradient-boosted decision tree (**GBDT**), that maximises the computational boundaries for boosted tree methods. +It's primarily designed to enhance both the performance and computational speed of machine learning models. +In XGBoost, trees are constructed concurrently, unlike the sequential approach taken by GBDT. + +Often, for tabular data on medium-sized datasets with fewer than 10k training examples, XGBoost surpasses the results of deep learning techniques. + +Why federated XGBoost? +~~~~~~~~ + +Indeed, as the demand for data privacy and decentralized learning grows, there's an increasing requirement to implement federated XGBoost systems for specialised applications, like survival analysis and financial fraud detection. + +Federated learning ensures that raw data remains on the local device, making it an attractive approach for sensitive domains where data security and privacy are paramount. +Given the robustness and efficiency of XGBoost, combining it with federated learning offers a promising solution for these specific challenges. + +In this tutorial we will learn how to train a federated XGBoost model on HIGGS dataset using Flower and :code:`xgboost` package. +We use a simple example (`full code xgboost-quickstart `_) with two *clients* and one *server* +to demonstrate how federated XGBoost works, +and then we dive into a more complex example (`full code xgboost-comprehensive `_) to run various experiments. + + +Environment Setup +------------- + +First of all, it is recommended to create a virtual environment and run everything within a `virtualenv `_. + +We first need to install Flower and Flower Datasets. You can do this by running : + +.. code-block:: shell + + $ pip install flwr flwr-datasets + +Since we want to use :code:`xgboost` package to build up XGBoost trees, let's go ahead and install :code:`xgboost`: + +.. code-block:: shell + + $ pip install xgboost + + +Flower Client +------------- + +*Clients* are responsible for generating individual weight-updates for the model based on their local datasets. +Now that we have all our dependencies installed, let's run a simple distributed training with two clients and one server. + +In a file called :code:`client.py`, import xgboost, Flower, Flower Datasets and other related functions: + +.. code-block:: python + + import argparse + from typing import Union + from logging import INFO + from datasets import Dataset, DatasetDict + import xgboost as xgb + + import flwr as fl + from flwr_datasets import FederatedDataset + from flwr.common.logger import log + from flwr.common import ( + Code, + EvaluateIns, + EvaluateRes, + FitIns, + FitRes, + GetParametersIns, + GetParametersRes, + Parameters, + Status, + ) + from flwr_datasets.partitioner import IidPartitioner + +Dataset partition and hyper-parameter selection +~~~~~~~~ + +Prior to local training, we require loading the HIGGS dataset from Flower Datasets and conduct data partitioning for FL: + +.. code-block:: python + + # Load (HIGGS) dataset and conduct partitioning + partitioner = IidPartitioner(num_partitions=2) + fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) + + # Load the partition for this `node_id` + partition = fds.load_partition(idx=args.node_id, split="train") + partition.set_format("numpy") + +In this example, we split the dataset into two partitions with uniform distribution (:code:`IidPartitioner(num_partitions=2)`). +Then, we load the partition for the given client based on :code:`node_id`: + +.. code-block:: python + + # We first define arguments parser for user to specify the client/node ID. + parser = argparse.ArgumentParser() + parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", + ) + args = parser.parse_args() + + # Load the partition for this `node_id`. + partition = fds.load_partition(idx=args.node_id, split="train") + partition.set_format("numpy") + +After that, we do train/test splitting on the given partition (client's local data), and transform data format for :code:`xgboost` package. + +.. code-block:: python + + # Train/test splitting + train_data, valid_data, num_train, num_val = train_test_split( + partition, test_fraction=0.2, seed=42 + ) + + # Reformat data to DMatrix for xgboost + train_dmatrix = transform_dataset_to_dmatrix(train_data) + valid_dmatrix = transform_dataset_to_dmatrix(valid_data) + +The functions of :code:`train_test_split` and :code:`transform_dataset_to_dmatrix` are defined as below: + +.. code-block:: python + + # Define data partitioning related functions + def train_test_split(partition: Dataset, test_fraction: float, seed: int): + """Split the data into train and validation set given split rate.""" + train_test = partition.train_test_split(test_size=test_fraction, seed=seed) + partition_train = train_test["train"] + partition_test = train_test["test"] + + num_train = len(partition_train) + num_test = len(partition_test) + + return partition_train, partition_test, num_train, num_test + + + def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core.DMatrix: + """Transform dataset to DMatrix format for xgboost.""" + x = data["inputs"] + y = data["label"] + new_data = xgb.DMatrix(x, label=y) + return new_data + +Finally, we define the hyper-parameters used for XGBoost training. + +.. code-block:: python + + num_local_round = 1 + params = { + "objective": "binary:logistic", + "eta": 0.1, # lr + "max_depth": 8, + "eval_metric": "auc", + "nthread": 16, + "num_parallel_tree": 1, + "subsample": 1, + "tree_method": "hist", + } + +The :code:`num_local_round` represents the number of iterations for local tree boost. +We use CPU for the training in default. +One can shift it to GPU by setting :code:`tree_method` to :code:`gpu_hist`. +We use AUC as evaluation metric. + + +Flower client definition for XGBoost +~~~~~~~~ + +After loading the dataset we define the Flower client. +We follow the general rule to define :code:`XgbClient` class inherited from :code:`fl.client.Client`. + +.. code-block:: python + + class XgbClient(fl.client.Client): + def __init__(self): + self.bst = None + self.config = None + +The :code:`self.bst` is used to keep the Booster objects that remain consistent across rounds, +allowing them to store predictions from trees integrated in earlier rounds and maintain other essential data structures for training. + +Then, we override :code:`get_parameters`, :code:`fit` and :code:`evaluate` methods insides :code:`XgbClient` class as follows. + +.. code-block:: python + + def get_parameters(self, ins: GetParametersIns) -> GetParametersRes: + _ = (self, ins) + return GetParametersRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[]), + ) + +Unlike neural network training, XGBoost trees are not started from a specified random weights. +In this case, we do not use :code:`get_parameters` and :code:`set_parameters` to initialise model parameters for XGBoost. +As a result, let's return an empty tensor in :code:`get_parameters` when it is called by the server at the first round. + +.. code-block:: python + + def fit(self, ins: FitIns) -> FitRes: + if not self.bst: + # First round local training + log(INFO, "Start training at round 1") + bst = xgb.train( + params, + train_dmatrix, + num_boost_round=num_local_round, + evals=[(valid_dmatrix, "validate"), (train_dmatrix, "train")], + ) + self.config = bst.save_config() + self.bst = bst + else: + for item in ins.parameters.tensors: + global_model = bytearray(item) + + # Load global model into booster + self.bst.load_model(global_model) + self.bst.load_config(self.config) + + bst = self._local_boost() + + local_model = bst.save_raw("json") + local_model_bytes = bytes(local_model) + + return FitRes( + status=Status( + code=Code.OK, + message="OK", + ), + parameters=Parameters(tensor_type="", tensors=[local_model_bytes]), + num_examples=num_train, + metrics={}, + ) + +In :code:`fit`, at the first round, we call :code:`xgb.train()` to build up the first set of trees. +the returned Booster object and config are stored in :code:`self.bst` and :code:`self.config`, respectively. +From the second round, we load the global model sent from server to :code:`self.bst`, +and then update model weights on local training data with function :code:`local_boost` as follows: + +.. code-block:: python + + def _local_boost(self): + # Update trees based on local training data. + for i in range(num_local_round): + self.bst.update(train_dmatrix, self.bst.num_boosted_rounds()) + + # Extract the last N=num_local_round trees for sever aggregation + bst = self.bst[ + self.bst.num_boosted_rounds() + - num_local_round : self.bst.num_boosted_rounds() + ] + +Given :code:`num_local_round`, we update trees by calling :code:`self.bst.update` method. +After training, the last :code:`N=num_local_round` trees will be extracted to send to the server. + +.. code-block:: python + + def evaluate(self, ins: EvaluateIns) -> EvaluateRes: + eval_results = self.bst.eval_set( + evals=[(valid_dmatrix, "valid")], + iteration=self.bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + + return EvaluateRes( + status=Status( + code=Code.OK, + message="OK", + ), + loss=0.0, + num_examples=num_val, + metrics={"AUC": auc}, + ) + +In :code:`evaluate`, we call :code:`self.bst.eval_set` function to conduct evaluation on valid set. +The AUC value will be returned. + +Now, we can create an instance of our class :code:`XgbClient` and add one line to actually run this client: + +.. code-block:: python + + fl.client.start_client(server_address="127.0.0.1:8080", client=XgbClient()) + +That's it for the client. We only have to implement :code:`Client`and call :code:`fl.client.start_client()`. +The string :code:`"[::]:8080"` tells the client which server to connect to. +In our case we can run the server and the client on the same machine, therefore we use +:code:`"[::]:8080"`. If we run a truly federated workload with the server and +clients running on different machines, all that needs to change is the +:code:`server_address` we point the client at. + + +Flower Server +------------- + +These updates are then sent to the *server* which will aggregate them to produce a better model. +Finally, the *server* sends this improved version of the model back to each *client* to finish a complete FL round. + +In a file named :code:`server.py`, import Flower and FedXgbBagging from :code:`flwr.server.strategy`. + +We first define a strategy for XGBoost bagging aggregation. + +.. code-block:: python + + # Define strategy + strategy = FedXgbBagging( + fraction_fit=1.0, + min_fit_clients=2, + min_available_clients=2, + min_evaluate_clients=2, + fraction_evaluate=1.0, + evaluate_metrics_aggregation_fn=evaluate_metrics_aggregation, + ) + + def evaluate_metrics_aggregation(eval_metrics): + """Return an aggregated metric (AUC) for evaluation.""" + total_num = sum([num for num, _ in eval_metrics]) + auc_aggregated = ( + sum([metrics["AUC"] * num for num, metrics in eval_metrics]) / total_num + ) + metrics_aggregated = {"AUC": auc_aggregated} + return metrics_aggregated + +We use two clients for this example. +An :code:`evaluate_metrics_aggregation` function is defined to collect and wighted average the AUC values from clients. + +Then, we start the server: + +.. code-block:: python + + # Start Flower server + fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=num_rounds), + strategy=strategy, + ) + +Tree-based bagging aggregation +~~~~~~~~ + +You must be curious about how bagging aggregation works. Let's look into the details. + +In file :code:`flwr.server.strategy.fedxgb_bagging.py`, we define :code:`FedXgbBagging` inherited from :code:`flwr.server.strategy.FedAvg`. +Then, we override the :code:`aggregate_fit`, :code:`aggregate_evaluate` and :code:`evaluate` methods as follows: + +.. code-block:: python + + import json + from logging import WARNING + from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast + + from flwr.common import EvaluateRes, FitRes, Parameters, Scalar + from flwr.common.logger import log + from flwr.server.client_proxy import ClientProxy + + from .fedavg import FedAvg + + + class FedXgbBagging(FedAvg): + """Configurable FedXgbBagging strategy implementation.""" + + def __init__( + self, + evaluate_function: Optional[ + Callable[ + [int, Parameters, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]], + ] + ] = None, + **kwargs: Any, + ): + self.evaluate_function = evaluate_function + self.global_model: Optional[bytes] = None + super().__init__(**kwargs) + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using bagging.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Aggregate all the client trees + global_model = self.global_model + for _, fit_res in results: + update = fit_res.parameters.tensors + for bst in update: + global_model = aggregate(global_model, bst) + + self.global_model = global_model + + return ( + Parameters(tensor_type="", tensors=[cast(bytes, global_model)]), + {}, + ) + + def aggregate_evaluate( + self, + server_round: int, + results: List[Tuple[ClientProxy, EvaluateRes]], + failures: List[Union[Tuple[ClientProxy, EvaluateRes], BaseException]], + ) -> Tuple[Optional[float], Dict[str, Scalar]]: + """Aggregate evaluation metrics using average.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if self.evaluate_metrics_aggregation_fn: + eval_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = self.evaluate_metrics_aggregation_fn(eval_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No evaluate_metrics_aggregation_fn provided") + + return 0, metrics_aggregated + + def evaluate( + self, server_round: int, parameters: Parameters + ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + """Evaluate model parameters using an evaluation function.""" + if self.evaluate_function is None: + # No evaluation function provided + return None + eval_res = self.evaluate_function(server_round, parameters, {}) + if eval_res is None: + return None + loss, metrics = eval_res + return loss, metrics + +In :code:`aggregate_fit`, we sequentially aggregate the clients' XGBoost trees by calling :code:`aggregate()` function: + +.. code-block:: python + + def aggregate( + bst_prev_org: Optional[bytes], + bst_curr_org: bytes, + ) -> bytes: + """Conduct bagging aggregation for given trees.""" + if not bst_prev_org: + return bst_curr_org + + # Get the tree numbers + tree_num_prev, _ = _get_tree_nums(bst_prev_org) + _, paral_tree_num_curr = _get_tree_nums(bst_curr_org) + + bst_prev = json.loads(bytearray(bst_prev_org)) + bst_curr = json.loads(bytearray(bst_curr_org)) + + bst_prev["learner"]["gradient_booster"]["model"]["gbtree_model_param"][ + "num_trees" + ] = str(tree_num_prev + paral_tree_num_curr) + iteration_indptr = bst_prev["learner"]["gradient_booster"]["model"][ + "iteration_indptr" + ] + bst_prev["learner"]["gradient_booster"]["model"]["iteration_indptr"].append( + iteration_indptr[-1] + paral_tree_num_curr + ) + + # Aggregate new trees + trees_curr = bst_curr["learner"]["gradient_booster"]["model"]["trees"] + for tree_count in range(paral_tree_num_curr): + trees_curr[tree_count]["id"] = tree_num_prev + tree_count + bst_prev["learner"]["gradient_booster"]["model"]["trees"].append( + trees_curr[tree_count] + ) + bst_prev["learner"]["gradient_booster"]["model"]["tree_info"].append(0) + + bst_prev_bytes = bytes(json.dumps(bst_prev), "utf-8") + + return bst_prev_bytes + + + def _get_tree_nums(xgb_model_org: bytes) -> Tuple[int, int]: + xgb_model = json.loads(bytearray(xgb_model_org)) + # Get the number of trees + tree_num = int( + xgb_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"][ + "num_trees" + ] + ) + # Get the number of parallel trees + paral_tree_num = int( + xgb_model["learner"]["gradient_booster"]["model"]["gbtree_model_param"][ + "num_parallel_tree" + ] + ) + return tree_num, paral_tree_num + +In this function, we first fetch the number of trees and the number of parallel trees for the current and previous model +by calling :code:`_get_tree_nums`. +Then, the fetched information will be aggregated. +After that, the trees (containing model weights) are aggregated to generate a new tree model. + +After traversal of all clients' models, a new global model is generated, +followed by the serialisation, and sending back to each client. + + +Launch Federated XGBoost! +--------------------------- + +With both client and server ready, we can now run everything and see federated +learning in action. FL systems usually have a server and multiple clients. We +therefore have to start the server first: + +.. code-block:: shell + + $ python3 server.py + +Once the server is running we can start the clients in different terminals. +Open a new terminal and start the first client: + +.. code-block:: shell + + $ python3 client.py --node-id=0 + +Open another terminal and start the second client: + +.. code-block:: shell + + $ python3 client.py --node-id=1 + +Each client will have its own dataset. +You should now see how the training does in the very first terminal (the one that started the server): + +.. code-block:: shell + + INFO flwr 2023-11-19 18:36:33,599 | app.py:163 | Starting Flower server, config: ServerConfig(num_rounds=5, round_timeout=None) + INFO flwr 2023-11-19 18:36:33,629 | app.py:176 | Flower ECE: gRPC server running (5 rounds), SSL is disabled + INFO flwr 2023-11-19 18:36:33,629 | server.py:89 | Initializing global parameters + INFO flwr 2023-11-19 18:36:33,629 | server.py:276 | Requesting initial parameters from one random client + INFO flwr 2023-11-19 18:40:03,997 | server.py:280 | Received initial parameters from one random client + INFO flwr 2023-11-19 18:40:03,999 | server.py:91 | Evaluating initial parameters + INFO flwr 2023-11-19 18:40:04,000 | server.py:104 | FL starting + DEBUG flwr 2023-11-19 18:40:04,098 | server.py:222 | fit_round 1: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:09,097 | server.py:236 | fit_round 1 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:09,108 | server.py:173 | evaluate_round 1: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:09,236 | server.py:187 | evaluate_round 1 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:09,237 | server.py:222 | fit_round 2: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:09,819 | server.py:236 | fit_round 2 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:09,855 | server.py:173 | evaluate_round 2: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:10,007 | server.py:187 | evaluate_round 2 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:10,007 | server.py:222 | fit_round 3: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:10,623 | server.py:236 | fit_round 3 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:10,674 | server.py:173 | evaluate_round 3: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:10,847 | server.py:187 | evaluate_round 3 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:10,847 | server.py:222 | fit_round 4: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:11,515 | server.py:236 | fit_round 4 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:11,581 | server.py:173 | evaluate_round 4: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:11,775 | server.py:187 | evaluate_round 4 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:11,775 | server.py:222 | fit_round 5: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:12,568 | server.py:236 | fit_round 5 received 2 results and 0 failures + DEBUG flwr 2023-11-19 18:40:12,648 | server.py:173 | evaluate_round 5: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-19 18:40:12,862 | server.py:187 | evaluate_round 5 received 2 results and 0 failures + INFO flwr 2023-11-19 18:40:12,862 | server.py:153 | FL finished in 8.86196927200001 + INFO flwr 2023-11-19 18:40:12,864 | app.py:226 | app_fit: losses_distributed [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0)] + INFO flwr 2023-11-19 18:40:12,864 | app.py:227 | app_fit: metrics_distributed_fit {} + INFO flwr 2023-11-19 18:40:12,864 | app.py:228 | app_fit: metrics_distributed {'AUC': [(1, 0.76315), (2, 0.7734), (3, 0.7783), (4, 0.7824), (5, 0.78595)]} + INFO flwr 2023-11-19 18:40:12,864 | app.py:229 | app_fit: losses_centralized [] + INFO flwr 2023-11-19 18:40:12,864 | app.py:230 | app_fit: metrics_centralized {} + +Congratulations! +You've successfully built and run your first federated XGBoost system. +The AUC values can be checked in :code:`metrics_distributed`. +One can see that the average AUC increases over FL rounds. + +The full `source code `_ for this example can be found in :code:`examples/xgboost-quickstart`. + + +Comprehensive Federated XGBoost +--------------------------- + +Now that you have known how federated XGBoost work with Flower, it's time to run some more comprehensive experiments by customising the experimental settings. +In the xgboost-comprehensive example (`full code `_), +we provide more options to define various experimental setups, including data partitioning and centralised/distributed evaluation. +Let's take a look! + +Customised data partitioning +~~~~~~~~ + +In :code:`dataset.py`, we have a function :code:`instantiate_partitioner` to instantiate the data partitioner +based on the given :code:`num_partitions` and :code:`partitioner_type`. +Currently, we provide four supported partitioner type to simulate the uniformity/non-uniformity in data quantity (uniform, linear, square, exponential). + +.. code-block:: python + + from flwr_datasets.partitioner import ( + IidPartitioner, + LinearPartitioner, + SquarePartitioner, + ExponentialPartitioner, + ) + + CORRELATION_TO_PARTITIONER = { + "uniform": IidPartitioner, + "linear": LinearPartitioner, + "square": SquarePartitioner, + "exponential": ExponentialPartitioner, + } + + + def instantiate_partitioner(partitioner_type: str, num_partitions: int): + """Initialise partitioner based on selected partitioner type and number of + partitions.""" + partitioner = CORRELATION_TO_PARTITIONER[partitioner_type]( + num_partitions=num_partitions + ) + return partitioner + + +Customised centralised/distributed evaluation +~~~~~~~~ + +To facilitate centralised evaluation, we define a function in :code:`server.py`: + +.. code-block:: python + + def get_evaluate_fn(test_data): + """Return a function for centralised evaluation.""" + + def evaluate_fn( + server_round: int, parameters: Parameters, config: Dict[str, Scalar] + ): + # If at the first round, skip the evaluation + if server_round == 0: + return 0, {} + else: + bst = xgb.Booster(params=params) + for para in parameters.tensors: + para_b = bytearray(para) + + # Load global model + bst.load_model(para_b) + # Run evaluation + eval_results = bst.eval_set( + evals=[(test_data, "valid")], + iteration=bst.num_boosted_rounds() - 1, + ) + auc = round(float(eval_results.split("\t")[1].split(":")[1]), 4) + log(INFO, f"AUC = {auc} at round {server_round}") + + return 0, {"AUC": auc} + + return evaluate_fn + +This function returns a evaluation function which instantiates a :code:`Booster` object and loads the global model weights to it. +The evaluation is conducted by calling :code:`eval_set()` method, and the tested AUC value is reported. + +As for distributed evaluation on the clients, it's same as the quick-start example by +overriding the :code:`evaluate()` method insides the :code:`XgbClient` class in :code:`client.py`. + +Arguments parser +~~~~~~~~ + +In :code:`utils.py`, we define the arguments parsers for clients and server, allowing users to specify different experimental settings. +Let's first see the sever side: + +.. code-block:: python + + import argparse + + + def server_args_parser(): + """Parse arguments to define experimental settings on server side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--pool-size", default=2, type=int, help="Number of total clients." + ) + parser.add_argument( + "--num-rounds", default=5, type=int, help="Number of FL rounds." + ) + parser.add_argument( + "--num-clients-per-round", + default=2, + type=int, + help="Number of clients participate in training each round.", + ) + parser.add_argument( + "--num-evaluate-clients", + default=2, + type=int, + help="Number of clients selected for evaluation.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + + args = parser.parse_args() + return args + +This allows user to specify the number of total clients / FL rounds / participating clients / clients for evaluation, +and evaluation fashion. Note that with :code:`--centralised-eval`, the sever will do centralised evaluation +and all functionalities for client evaluation will be disabled. + +Then, the argument parser on client side: + +.. code-block:: python + + def client_args_parser(): + """Parse arguments to define experimental settings on client side.""" + parser = argparse.ArgumentParser() + + parser.add_argument( + "--num-partitions", default=10, type=int, help="Number of partitions." + ) + parser.add_argument( + "--partitioner-type", + default="uniform", + type=str, + choices=["uniform", "linear", "square", "exponential"], + help="Partitioner types.", + ) + parser.add_argument( + "--node-id", + default=0, + type=int, + help="Node ID used for the current client.", + ) + parser.add_argument( + "--seed", default=42, type=int, help="Seed used for train/test splitting." + ) + parser.add_argument( + "--test-fraction", + default=0.2, + type=float, + help="Test fraction for train/test splitting.", + ) + parser.add_argument( + "--centralised-eval", + action="store_true", + help="Conduct centralised evaluation (True), or client evaluation on hold-out data (False).", + ) + + args = parser.parse_args() + return args + +This defines various options for client data partitioning. +Besides, clients also have a option to conduct evaluation on centralised test set by setting :code:`--centralised-eval`. + +Example commands +~~~~~~~~ + +To run a centralised evaluated experiment on 5 clients with exponential distribution for 50 rounds, +we first start the server as below: + +.. code-block:: shell + + $ python3 server.py --pool-size=5 --num-rounds=50 --num-clients-per-round=5 --centralised-eval + +Then, on each client terminal, we start the clients: + +.. code-block:: shell + + $ python3 clients.py --num-partitions=5 --partitioner-type=exponential --node-id=NODE_ID + +The full `source code `_ for this comprehensive example can be found in :code:`examples/xgboost-comprehensive`. From 78cb4171442290171d0633c08f4545274f3d19ce Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Mon, 20 Nov 2023 11:59:38 +0000 Subject: [PATCH 05/11] Update XGBoost examples (#2619) Co-authored-by: yan-gao-GY --- doc/source/tutorial-quickstart-xgboost.rst | 69 +++++++++++----------- examples/xgboost-comprehensive/README.md | 3 +- examples/xgboost-comprehensive/client.py | 2 + examples/xgboost-quickstart/README.md | 1 + examples/xgboost-quickstart/client.py | 5 +- 5 files changed, 44 insertions(+), 36 deletions(-) diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 3de4e0ef81cb..8d6f78f3088a 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -88,7 +88,8 @@ Prior to local training, we require loading the HIGGS dataset from Flower Datase .. code-block:: python # Load (HIGGS) dataset and conduct partitioning - partitioner = IidPartitioner(num_partitions=2) + # We use a small subset (num_partitions=30) of the dataset for demonstration to speed up the data loading process. + partitioner = IidPartitioner(num_partitions=30) fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) # Load the partition for this `node_id` @@ -544,39 +545,39 @@ You should now see how the training does in the very first terminal (the one tha .. code-block:: shell - INFO flwr 2023-11-19 18:36:33,599 | app.py:163 | Starting Flower server, config: ServerConfig(num_rounds=5, round_timeout=None) - INFO flwr 2023-11-19 18:36:33,629 | app.py:176 | Flower ECE: gRPC server running (5 rounds), SSL is disabled - INFO flwr 2023-11-19 18:36:33,629 | server.py:89 | Initializing global parameters - INFO flwr 2023-11-19 18:36:33,629 | server.py:276 | Requesting initial parameters from one random client - INFO flwr 2023-11-19 18:40:03,997 | server.py:280 | Received initial parameters from one random client - INFO flwr 2023-11-19 18:40:03,999 | server.py:91 | Evaluating initial parameters - INFO flwr 2023-11-19 18:40:04,000 | server.py:104 | FL starting - DEBUG flwr 2023-11-19 18:40:04,098 | server.py:222 | fit_round 1: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:09,097 | server.py:236 | fit_round 1 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:09,108 | server.py:173 | evaluate_round 1: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:09,236 | server.py:187 | evaluate_round 1 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:09,237 | server.py:222 | fit_round 2: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:09,819 | server.py:236 | fit_round 2 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:09,855 | server.py:173 | evaluate_round 2: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:10,007 | server.py:187 | evaluate_round 2 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:10,007 | server.py:222 | fit_round 3: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:10,623 | server.py:236 | fit_round 3 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:10,674 | server.py:173 | evaluate_round 3: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:10,847 | server.py:187 | evaluate_round 3 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:10,847 | server.py:222 | fit_round 4: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:11,515 | server.py:236 | fit_round 4 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:11,581 | server.py:173 | evaluate_round 4: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:11,775 | server.py:187 | evaluate_round 4 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:11,775 | server.py:222 | fit_round 5: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:12,568 | server.py:236 | fit_round 5 received 2 results and 0 failures - DEBUG flwr 2023-11-19 18:40:12,648 | server.py:173 | evaluate_round 5: strategy sampled 2 clients (out of 2) - DEBUG flwr 2023-11-19 18:40:12,862 | server.py:187 | evaluate_round 5 received 2 results and 0 failures - INFO flwr 2023-11-19 18:40:12,862 | server.py:153 | FL finished in 8.86196927200001 - INFO flwr 2023-11-19 18:40:12,864 | app.py:226 | app_fit: losses_distributed [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0)] - INFO flwr 2023-11-19 18:40:12,864 | app.py:227 | app_fit: metrics_distributed_fit {} - INFO flwr 2023-11-19 18:40:12,864 | app.py:228 | app_fit: metrics_distributed {'AUC': [(1, 0.76315), (2, 0.7734), (3, 0.7783), (4, 0.7824), (5, 0.78595)]} - INFO flwr 2023-11-19 18:40:12,864 | app.py:229 | app_fit: losses_centralized [] - INFO flwr 2023-11-19 18:40:12,864 | app.py:230 | app_fit: metrics_centralized {} + INFO flwr 2023-11-20 11:21:56,454 | app.py:163 | Starting Flower server, config: ServerConfig(num_rounds=5, round_timeout=None) + INFO flwr 2023-11-20 11:21:56,473 | app.py:176 | Flower ECE: gRPC server running (5 rounds), SSL is disabled + INFO flwr 2023-11-20 11:21:56,473 | server.py:89 | Initializing global parameters + INFO flwr 2023-11-20 11:21:56,473 | server.py:276 | Requesting initial parameters from one random client + INFO flwr 2023-11-20 11:22:38,302 | server.py:280 | Received initial parameters from one random client + INFO flwr 2023-11-20 11:22:38,302 | server.py:91 | Evaluating initial parameters + INFO flwr 2023-11-20 11:22:38,302 | server.py:104 | FL starting + DEBUG flwr 2023-11-20 11:22:38,302 | server.py:222 | fit_round 1: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,636 | server.py:236 | fit_round 1 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:38,643 | server.py:173 | evaluate_round 1: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,653 | server.py:187 | evaluate_round 1 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:38,653 | server.py:222 | fit_round 2: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,721 | server.py:236 | fit_round 2 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:38,745 | server.py:173 | evaluate_round 2: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,756 | server.py:187 | evaluate_round 2 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:38,756 | server.py:222 | fit_round 3: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,831 | server.py:236 | fit_round 3 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:38,868 | server.py:173 | evaluate_round 3: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,881 | server.py:187 | evaluate_round 3 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:38,881 | server.py:222 | fit_round 4: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:38,960 | server.py:236 | fit_round 4 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:39,012 | server.py:173 | evaluate_round 4: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:39,026 | server.py:187 | evaluate_round 4 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:39,026 | server.py:222 | fit_round 5: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:39,111 | server.py:236 | fit_round 5 received 2 results and 0 failures + DEBUG flwr 2023-11-20 11:22:39,177 | server.py:173 | evaluate_round 5: strategy sampled 2 clients (out of 2) + DEBUG flwr 2023-11-20 11:22:39,193 | server.py:187 | evaluate_round 5 received 2 results and 0 failures + INFO flwr 2023-11-20 11:22:39,193 | server.py:153 | FL finished in 0.8905023969999988 + INFO flwr 2023-11-20 11:22:39,193 | app.py:226 | app_fit: losses_distributed [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0)] + INFO flwr 2023-11-20 11:22:39,193 | app.py:227 | app_fit: metrics_distributed_fit {} + INFO flwr 2023-11-20 11:22:39,193 | app.py:228 | app_fit: metrics_distributed {'AUC': [(1, 0.7572), (2, 0.7705), (3, 0.77595), (4, 0.78), (5, 0.78385)]} + INFO flwr 2023-11-20 11:22:39,193 | app.py:229 | app_fit: losses_centralized [] + INFO flwr 2023-11-20 11:22:39,193 | app.py:230 | app_fit: metrics_centralized {} Congratulations! You've successfully built and run your first federated XGBoost system. diff --git a/examples/xgboost-comprehensive/README.md b/examples/xgboost-comprehensive/README.md index 783438188dab..da002a10d301 100644 --- a/examples/xgboost-comprehensive/README.md +++ b/examples/xgboost-comprehensive/README.md @@ -1,7 +1,8 @@ # Flower Example using XGBoost (Comprehensive) This example demonstrates a comprehensive federated learning setup using Flower with XGBoost. -It differs from the quickstart example in the following ways: +We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset to perform a binary classification task. +It differs from the [xgboost-quickstart](https://github.com/adap/flower/tree/main/examples/xgboost-quickstart) example in the following ways: - Arguments parsers of server and clients for hyperparameters selection. - Customised FL settings. diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py index 889f5f47e0ae..bc9735a2f657 100644 --- a/examples/xgboost-comprehensive/client.py +++ b/examples/xgboost-comprehensive/client.py @@ -47,6 +47,7 @@ ) # Load the partition for this `node_id` +log(INFO, "Loading partition...") node_id = args.node_id partition = fds.load_partition(idx=node_id, split="train") partition.set_format("numpy") @@ -67,6 +68,7 @@ ) # Reformat data to DMatrix for xgboost +log(INFO, "Reformatting data...") train_dmatrix = transform_dataset_to_dmatrix(train_data) valid_dmatrix = transform_dataset_to_dmatrix(valid_data) diff --git a/examples/xgboost-quickstart/README.md b/examples/xgboost-quickstart/README.md index 309f8f55b847..5174c236c668 100644 --- a/examples/xgboost-quickstart/README.md +++ b/examples/xgboost-quickstart/README.md @@ -1,6 +1,7 @@ # Flower Example using XGBoost This example demonstrates how to perform EXtreme Gradient Boosting (XGBoost) within Flower using `xgboost` package. +We use [HIGGS](https://archive.ics.uci.edu/dataset/280/higgs) dataset for this example to perform a binary classification task. Tree-based with bagging method is used for aggregation on the server. This project provides a minimal code example to enable you to get stated quickly. For a more comprehensive code example, take a look at [xgboost-comprehensive](https://github.com/adap/flower/tree/main/examples/xgboost-comprehensive). diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py index ed85e98a3ab2..e88580197128 100644 --- a/examples/xgboost-quickstart/client.py +++ b/examples/xgboost-quickstart/client.py @@ -57,10 +57,12 @@ def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core. # Load (HIGGS) dataset and conduct partitioning -partitioner = IidPartitioner(num_partitions=2) +# We use a small subset (num_partitions=30) of the dataset for demonstration to speed up the data loading process. +partitioner = IidPartitioner(num_partitions=30) fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) # Load the partition for this `node_id` +log(INFO, "Loading partition...") partition = fds.load_partition(idx=args.node_id, split="train") partition.set_format("numpy") @@ -70,6 +72,7 @@ def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core. ) # Reformat data to DMatrix for xgboost +log(INFO, "Reformatting data...") train_dmatrix = transform_dataset_to_dmatrix(train_data) valid_dmatrix = transform_dataset_to_dmatrix(valid_data) From 10c05e26c5495e4c91ac95592ab0856f715e7199 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Nov 2023 13:11:34 +0000 Subject: [PATCH 06/11] Bump pyyaml from 5.3 to 5.4 in /examples/quickstart-mlcube/dev (#2492) Bumps [pyyaml](https://github.com/yaml/pyyaml) from 5.3 to 5.4. - [Changelog](https://github.com/yaml/pyyaml/blob/main/CHANGES) - [Commits](https://github.com/yaml/pyyaml/compare/5.3...5.4) --- updated-dependencies: - dependency-name: pyyaml dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Taner Topal Co-authored-by: Charles Beauville Co-authored-by: Daniel J. Beutel --- examples/quickstart-mlcube/dev/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/quickstart-mlcube/dev/requirements.txt b/examples/quickstart-mlcube/dev/requirements.txt index c39a5fa73f81..5cfa618878d2 100644 --- a/examples/quickstart-mlcube/dev/requirements.txt +++ b/examples/quickstart-mlcube/dev/requirements.txt @@ -1,4 +1,4 @@ -PyYAML==5.3 +PyYAML==5.4 tensorflow==2.14.0 tensorflow-estimator==2.14.0 requests[security] From 10bfc33df73ec461de4ed0f4a99d09fb645a1555 Mon Sep 17 00:00:00 2001 From: Gabriel Mota <63614154+gfmota@users.noreply.github.com> Date: Mon, 20 Nov 2023 19:12:06 -0300 Subject: [PATCH 07/11] Fix FedXgbNnAvg strategy doc to reference the right paper (#2542) Signed-off-by: gfmota --- src/py/flwr/server/strategy/fedxgb_nn_avg.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/py/flwr/server/strategy/fedxgb_nn_avg.py b/src/py/flwr/server/strategy/fedxgb_nn_avg.py index 020e0ef71267..f300633d0d9f 100644 --- a/src/py/flwr/server/strategy/fedxgb_nn_avg.py +++ b/src/py/flwr/server/strategy/fedxgb_nn_avg.py @@ -17,7 +17,7 @@ Strategy in the horizontal setting based on building Neural Network and averaging on prediction outcomes. -Paper: Coming +Paper: arxiv.org/abs/2304.07537 """ @@ -35,6 +35,13 @@ class FedXgbNnAvg(FedAvg): """Configurable FedXgbNnAvg strategy implementation.""" + def __init__(self, *args: Any, **kwargs: Any) -> None: + """Federated XGBoost [Ma et al., 2023] strategy. + + Implementation based on https://arxiv.org/abs/2304.07537. + """ + super().__init__(*args, **kwargs) + def __repr__(self) -> str: """Compute a string representation of the strategy.""" rep = f"FedXgbNnAvg(accept_failures={self.accept_failures})" From 8bcc4bdaa80f065ce5a95f19326c9a7bc0ef74f7 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Tue, 21 Nov 2023 12:16:47 +0100 Subject: [PATCH 08/11] Introduce experimental feature warning (#2622) --- .../flwr/client/grpc_rere_client/connection.py | 14 ++++---------- src/py/flwr/common/logger.py | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/py/flwr/client/grpc_rere_client/connection.py b/src/py/flwr/client/grpc_rere_client/connection.py index b69228826e13..424e413dc484 100644 --- a/src/py/flwr/client/grpc_rere_client/connection.py +++ b/src/py/flwr/client/grpc_rere_client/connection.py @@ -16,7 +16,7 @@ from contextlib import contextmanager -from logging import DEBUG, ERROR, WARN +from logging import DEBUG, ERROR from pathlib import Path from typing import Callable, Dict, Iterator, Optional, Tuple, Union, cast @@ -28,7 +28,7 @@ ) from flwr.common import GRPC_MAX_MESSAGE_LENGTH from flwr.common.grpc import create_channel -from flwr.common.logger import log +from flwr.common.logger import log, warn_experimental_feature from flwr.proto.fleet_pb2 import ( CreateNodeRequest, DeleteNodeRequest, @@ -88,6 +88,8 @@ def grpc_request_response( create_node : Optional[Callable] delete_node : Optional[Callable] """ + warn_experimental_feature("`grpc-rere`") + if isinstance(root_certificates, str): root_certificates = Path(root_certificates).read_bytes() @@ -99,14 +101,6 @@ def grpc_request_response( channel.subscribe(on_channel_state_change) stub = FleetStub(channel) - log( - WARN, - """ - EXPERIMENTAL: `grpc-rere` is an experimental transport layer, it might change - considerably in future versions of Flower - """, - ) - # Necessary state to link TaskRes to TaskIns state: Dict[str, Optional[TaskIns]] = {KEY_TASK_INS: None} diff --git a/src/py/flwr/common/logger.py b/src/py/flwr/common/logger.py index e543d6565878..29d1562a86d3 100644 --- a/src/py/flwr/common/logger.py +++ b/src/py/flwr/common/logger.py @@ -16,7 +16,7 @@ import logging -from logging import LogRecord +from logging import WARN, LogRecord from logging.handlers import HTTPHandler from typing import Any, Dict, Optional, Tuple @@ -97,3 +97,17 @@ def configure( logger = logging.getLogger(LOGGER_NAME) # pylint: disable=invalid-name log = logger.log # pylint: disable=invalid-name + + +def warn_experimental_feature(name: str) -> None: + """Warn the user when they use an experimental feature.""" + log( + WARN, + """ + EXPERIMENTAL FEATURE: %s + + This is an experimental feature. It could change significantly or be removed + entirely in future versions of Flower. + """, + name, + ) From 1ad038b5de27e250c5773b212395e27c0e9d7833 Mon Sep 17 00:00:00 2001 From: Daniel Nata Nugraha Date: Tue, 21 Nov 2023 15:56:31 +0100 Subject: [PATCH 09/11] Fix wrong status int value in Android SDK (#2623) --- .../flwr/src/main/java/dev/flower/android/Typing.kt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt b/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt index a88af0e28974..6db7ecd36987 100644 --- a/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt +++ b/src/kotlin/flwr/src/main/java/dev/flower/android/Typing.kt @@ -23,11 +23,11 @@ typealias Properties = Map * The `Code` class defines client status codes used in the application. */ enum class Code(val value: Int) { - OK(1), - GET_PROPERTIES_NOT_IMPLEMENTED(2), - GET_PARAMETERS_NOT_IMPLEMENTED(3), - FIT_NOT_IMPLEMENTED(4), - EVALUATE_NOT_IMPLEMENTED(5); + OK(0), + GET_PROPERTIES_NOT_IMPLEMENTED(1), + GET_PARAMETERS_NOT_IMPLEMENTED(2), + FIT_NOT_IMPLEMENTED(3), + EVALUATE_NOT_IMPLEMENTED(4); companion object { fun fromInt(value: Int): Code = values().first { it.value == value } From 821d843278e60c55acdfb3574de8958c26f7a644 Mon Sep 17 00:00:00 2001 From: Yan Gao Date: Tue, 21 Nov 2023 21:48:36 +0000 Subject: [PATCH 10/11] XGBoost -- Fix node_id issue to fit FDSv0.0.2 (#2624) Co-authored-by: yan-gao-GY --- doc/source/tutorial-quickstart-xgboost.rst | 2 +- examples/xgboost-comprehensive/client.py | 6 ++++-- examples/xgboost-comprehensive/pyproject.toml | 2 +- examples/xgboost-comprehensive/requirements.txt | 2 +- examples/xgboost-quickstart/client.py | 2 +- examples/xgboost-quickstart/pyproject.toml | 2 +- examples/xgboost-quickstart/requirements.txt | 2 +- 7 files changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/source/tutorial-quickstart-xgboost.rst b/doc/source/tutorial-quickstart-xgboost.rst index 8d6f78f3088a..111920d5602b 100644 --- a/doc/source/tutorial-quickstart-xgboost.rst +++ b/doc/source/tutorial-quickstart-xgboost.rst @@ -93,7 +93,7 @@ Prior to local training, we require loading the HIGGS dataset from Flower Datase fds = FederatedDataset(dataset="jxie/higgs", partitioners={"train": partitioner}) # Load the partition for this `node_id` - partition = fds.load_partition(idx=args.node_id, split="train") + partition = fds.load_partition(node_id=args.node_id, split="train") partition.set_format("numpy") In this example, we split the dataset into two partitions with uniform distribution (:code:`IidPartitioner(num_partitions=2)`). diff --git a/examples/xgboost-comprehensive/client.py b/examples/xgboost-comprehensive/client.py index bc9735a2f657..a37edac32648 100644 --- a/examples/xgboost-comprehensive/client.py +++ b/examples/xgboost-comprehensive/client.py @@ -43,13 +43,15 @@ partitioner_type=partitioner_type, num_partitions=num_partitions ) fds = FederatedDataset( - dataset="jxie/higgs", partitioners={"train": partitioner}, resplitter=resplit + dataset="jxie/higgs", + partitioners={"train": partitioner}, + resplitter=resplit, ) # Load the partition for this `node_id` log(INFO, "Loading partition...") node_id = args.node_id -partition = fds.load_partition(idx=node_id, split="train") +partition = fds.load_partition(node_id=node_id, split="train") partition.set_format("numpy") if args.centralised_eval: diff --git a/examples/xgboost-comprehensive/pyproject.toml b/examples/xgboost-comprehensive/pyproject.toml index 5414b5122154..bbfbb4134b8d 100644 --- a/examples/xgboost-comprehensive/pyproject.toml +++ b/examples/xgboost-comprehensive/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" +flwr-nightly = ">=1.0,<2.0" flwr-datasets = ">=0.0.2,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-comprehensive/requirements.txt b/examples/xgboost-comprehensive/requirements.txt index c6b9c1a67894..c37ac2b6ad6d 100644 --- a/examples/xgboost-comprehensive/requirements.txt +++ b/examples/xgboost-comprehensive/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.0, <2.0 +flwr-nightly>=1.0, <2.0 flwr-datasets>=0.0.2, <1.0.0 xgboost>=2.0.0, <3.0.0 diff --git a/examples/xgboost-quickstart/client.py b/examples/xgboost-quickstart/client.py index e88580197128..b5eab59ba14d 100644 --- a/examples/xgboost-quickstart/client.py +++ b/examples/xgboost-quickstart/client.py @@ -63,7 +63,7 @@ def transform_dataset_to_dmatrix(data: Union[Dataset, DatasetDict]) -> xgb.core. # Load the partition for this `node_id` log(INFO, "Loading partition...") -partition = fds.load_partition(idx=args.node_id, split="train") +partition = fds.load_partition(node_id=args.node_id, split="train") partition.set_format("numpy") # Train/test splitting diff --git a/examples/xgboost-quickstart/pyproject.toml b/examples/xgboost-quickstart/pyproject.toml index 74256846c693..d82535311e58 100644 --- a/examples/xgboost-quickstart/pyproject.toml +++ b/examples/xgboost-quickstart/pyproject.toml @@ -10,6 +10,6 @@ authors = ["The Flower Authors "] [tool.poetry.dependencies] python = ">=3.8,<3.11" -flwr = ">=1.0,<2.0" +flwr-nightly = ">=1.0,<2.0" flwr-datasets = ">=0.0.1,<1.0.0" xgboost = ">=2.0.0,<3.0.0" diff --git a/examples/xgboost-quickstart/requirements.txt b/examples/xgboost-quickstart/requirements.txt index 9596a8d6cd02..aefd74097582 100644 --- a/examples/xgboost-quickstart/requirements.txt +++ b/examples/xgboost-quickstart/requirements.txt @@ -1,3 +1,3 @@ -flwr>=1.0, <2.0 +flwr-nightly>=1.0, <2.0 flwr-datasets>=0.0.1, <1.0.0 xgboost>=2.0.0, <3.0.0 From 1f9fa755d005f1df4aa01366ca482385b9520de8 Mon Sep 17 00:00:00 2001 From: "Daniel J. Beutel" Date: Thu, 23 Nov 2023 09:15:15 +0100 Subject: [PATCH 11/11] Decouple client callable (#2393) --- examples/mt-pytorch-callable/README.md | 49 +++++++ examples/mt-pytorch-callable/client.py | 123 ++++++++++++++++ examples/mt-pytorch-callable/driver.py | 25 ++++ examples/mt-pytorch-callable/pyproject.toml | 16 ++ examples/mt-pytorch-callable/requirements.txt | 4 + examples/mt-pytorch-callable/run.sh | 20 +++ examples/mt-pytorch-callable/server.py | 25 ++++ src/py/flwr/__init__.py | 3 +- src/py/flwr/client/app.py | 84 ++++++++--- src/py/flwr/client/flower.py | 138 ++++++++++++++++++ src/py/flwr/flower/__init__.py | 26 ++++ 11 files changed, 495 insertions(+), 18 deletions(-) create mode 100644 examples/mt-pytorch-callable/README.md create mode 100644 examples/mt-pytorch-callable/client.py create mode 100644 examples/mt-pytorch-callable/driver.py create mode 100644 examples/mt-pytorch-callable/pyproject.toml create mode 100644 examples/mt-pytorch-callable/requirements.txt create mode 100755 examples/mt-pytorch-callable/run.sh create mode 100644 examples/mt-pytorch-callable/server.py create mode 100644 src/py/flwr/client/flower.py create mode 100644 src/py/flwr/flower/__init__.py diff --git a/examples/mt-pytorch-callable/README.md b/examples/mt-pytorch-callable/README.md new file mode 100644 index 000000000000..65ef000c26f2 --- /dev/null +++ b/examples/mt-pytorch-callable/README.md @@ -0,0 +1,49 @@ +# Deploy ๐Ÿงช + +๐Ÿงช = this page covers experimental features that might change in future versions of Flower + +This how-to guide describes the deployment of a long-running Flower server. + +## Preconditions + +Let's assume the following project structure: + +```bash +$ tree . +. +โ””โ”€โ”€ client.py +โ”œโ”€โ”€ driver.py +โ”œโ”€โ”€ requirements.txt +``` + +## Install dependencies + +```bash +pip install -r requirements.txt +``` + +## Start the long-running Flower server + +```bash +flower-server --insecure +``` + +## Start the long-running Flower client + +In a new terminal window, start the first long-running Flower client: + +```bash +flower-client --callable client:flower +``` + +In yet another new terminal window, start the second long-running Flower client: + +```bash +flower-client --callable client:flower +``` + +## Start the Driver script + +```bash +python driver.py +``` diff --git a/examples/mt-pytorch-callable/client.py b/examples/mt-pytorch-callable/client.py new file mode 100644 index 000000000000..6f9747784ae0 --- /dev/null +++ b/examples/mt-pytorch-callable/client.py @@ -0,0 +1,123 @@ +import warnings +from collections import OrderedDict + +import flwr as fl +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.utils.data import DataLoader +from torchvision.datasets import CIFAR10 +from torchvision.transforms import Compose, Normalize, ToTensor +from tqdm import tqdm + + +# ############################################################################# +# 1. Regular PyTorch pipeline: nn.Module, train, test, and DataLoader +# ############################################################################# + +warnings.filterwarnings("ignore", category=UserWarning) +DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + +class Net(nn.Module): + """Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')""" + + def __init__(self) -> None: + super(Net, self).__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = x.view(-1, 16 * 5 * 5) + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + return self.fc3(x) + + +def train(net, trainloader, epochs): + """Train the model on the training set.""" + criterion = torch.nn.CrossEntropyLoss() + optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + for _ in range(epochs): + for images, labels in tqdm(trainloader): + optimizer.zero_grad() + criterion(net(images.to(DEVICE)), labels.to(DEVICE)).backward() + optimizer.step() + + +def test(net, testloader): + """Validate the model on the test set.""" + criterion = torch.nn.CrossEntropyLoss() + correct, loss = 0, 0.0 + with torch.no_grad(): + for images, labels in tqdm(testloader): + outputs = net(images.to(DEVICE)) + labels = labels.to(DEVICE) + loss += criterion(outputs, labels).item() + correct += (torch.max(outputs.data, 1)[1] == labels).sum().item() + accuracy = correct / len(testloader.dataset) + return loss, accuracy + + +def load_data(): + """Load CIFAR-10 (training and test set).""" + trf = Compose([ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + trainset = CIFAR10("./data", train=True, download=True, transform=trf) + testset = CIFAR10("./data", train=False, download=True, transform=trf) + return DataLoader(trainset, batch_size=32, shuffle=True), DataLoader(testset) + + +# ############################################################################# +# 2. Federation of the pipeline with Flower +# ############################################################################# + +# Load model and data (simple CNN, CIFAR-10) +net = Net().to(DEVICE) +trainloader, testloader = load_data() + + +# Define Flower client +class FlowerClient(fl.client.NumPyClient): + def get_parameters(self, config): + return [val.cpu().numpy() for _, val in net.state_dict().items()] + + def set_parameters(self, parameters): + params_dict = zip(net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + + def fit(self, parameters, config): + self.set_parameters(parameters) + train(net, trainloader, epochs=1) + return self.get_parameters(config={}), len(trainloader.dataset), {} + + def evaluate(self, parameters, config): + self.set_parameters(parameters) + loss, accuracy = test(net, testloader) + return loss, len(testloader.dataset), {"accuracy": accuracy} + + +def client_fn(cid: str): + """.""" + return FlowerClient().to_client() + + +# To run this: `flower-client --callable client:flower` +flower = fl.flower.Flower( + client_fn=client_fn, +) + + +if __name__ == "__main__": + # Start Flower client + fl.client.start_client( + server_address="0.0.0.0:9092", + client=FlowerClient().to_client(), + transport="grpc-rere", + ) diff --git a/examples/mt-pytorch-callable/driver.py b/examples/mt-pytorch-callable/driver.py new file mode 100644 index 000000000000..1248672b6813 --- /dev/null +++ b/examples/mt-pytorch-callable/driver.py @@ -0,0 +1,25 @@ +from typing import List, Tuple + +import flwr as fl +from flwr.common import Metrics + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +# Define strategy +strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) + +# Start Flower driver +fl.driver.start_driver( + server_address="0.0.0.0:9091", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/examples/mt-pytorch-callable/pyproject.toml b/examples/mt-pytorch-callable/pyproject.toml new file mode 100644 index 000000000000..0d1a91836006 --- /dev/null +++ b/examples/mt-pytorch-callable/pyproject.toml @@ -0,0 +1,16 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "quickstart-pytorch" +version = "0.1.0" +description = "PyTorch Federated Learning Quickstart with Flower" +authors = ["The Flower Authors "] + +[tool.poetry.dependencies] +python = ">=3.8,<3.11" +flwr = { path = "../../", develop = true, extras = ["simulation", "rest"] } +torch = "1.13.1" +torchvision = "0.14.1" +tqdm = "4.65.0" diff --git a/examples/mt-pytorch-callable/requirements.txt b/examples/mt-pytorch-callable/requirements.txt new file mode 100644 index 000000000000..797ca6db6244 --- /dev/null +++ b/examples/mt-pytorch-callable/requirements.txt @@ -0,0 +1,4 @@ +flwr>=1.0, <2.0 +torch==1.13.1 +torchvision==0.14.1 +tqdm==4.65.0 diff --git a/examples/mt-pytorch-callable/run.sh b/examples/mt-pytorch-callable/run.sh new file mode 100755 index 000000000000..d2bf34f834b1 --- /dev/null +++ b/examples/mt-pytorch-callable/run.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"/ + +# Download the CIFAR-10 dataset +python -c "from torchvision.datasets import CIFAR10; CIFAR10('./data', download=True)" + +echo "Starting server" +python server.py & +sleep 3 # Sleep for 3s to give the server enough time to start + +for i in `seq 0 1`; do + echo "Starting client $i" + python client.py & +done + +# Enable CTRL+C to stop all background processes +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM +# Wait for all background processes to complete +wait diff --git a/examples/mt-pytorch-callable/server.py b/examples/mt-pytorch-callable/server.py new file mode 100644 index 000000000000..fe691a88aba0 --- /dev/null +++ b/examples/mt-pytorch-callable/server.py @@ -0,0 +1,25 @@ +from typing import List, Tuple + +import flwr as fl +from flwr.common import Metrics + + +# Define metric aggregation function +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + return {"accuracy": sum(accuracies) / sum(examples)} + + +# Define strategy +strategy = fl.server.strategy.FedAvg(evaluate_metrics_aggregation_fn=weighted_average) + +# Start Flower server +fl.server.start_server( + server_address="0.0.0.0:8080", + config=fl.server.ServerConfig(num_rounds=3), + strategy=strategy, +) diff --git a/src/py/flwr/__init__.py b/src/py/flwr/__init__.py index d3cbf00747a4..e05799280339 100644 --- a/src/py/flwr/__init__.py +++ b/src/py/flwr/__init__.py @@ -17,12 +17,13 @@ from flwr.common.version import package_version as _package_version -from . import client, common, driver, server, simulation +from . import client, common, driver, flower, server, simulation __all__ = [ "client", "common", "driver", + "flower", "server", "simulation", ] diff --git a/src/py/flwr/client/app.py b/src/py/flwr/client/app.py index 0013b74c631c..b39dbbfc33c0 100644 --- a/src/py/flwr/client/app.py +++ b/src/py/flwr/client/app.py @@ -22,6 +22,7 @@ from typing import Callable, ContextManager, Optional, Tuple, Union from flwr.client.client import Client +from flwr.client.flower import Bwd, Flower, Fwd from flwr.client.typing import ClientFn from flwr.common import GRPC_MAX_MESSAGE_LENGTH, EventType, event from flwr.common.address import parse_address @@ -32,13 +33,15 @@ TRANSPORT_TYPE_REST, TRANSPORT_TYPES, ) -from flwr.common.logger import log +from flwr.common.logger import log, warn_experimental_feature from flwr.proto.task_pb2 import TaskIns, TaskRes +from .flower import load_callable from .grpc_client.connection import grpc_connection from .grpc_rere_client.connection import grpc_request_response -from .message_handler.message_handler import handle, handle_control_message +from .message_handler.message_handler import handle_control_message from .numpy_client import NumPyClient +from .workload_state import WorkloadState def run_client() -> None: @@ -48,6 +51,22 @@ def run_client() -> None: args = _parse_args_client().parse_args() print(args.server) + print(args.callable_dir) + print(args.callable) + + callable_dir = args.callable_dir + if callable_dir is not None: + sys.path.insert(0, callable_dir) + + def _load() -> Flower: + flower: Flower = load_callable(args.callable) + return flower + + return start_client( + server_address=args.server, + load_callable_fn=_load, + transport="grpc-rere", # Only + ) def _parse_args_client() -> argparse.ArgumentParser: @@ -58,8 +77,18 @@ def _parse_args_client() -> argparse.ArgumentParser: parser.add_argument( "--server", - help="Server address", default="0.0.0.0:9092", + help="Server address", + ) + parser.add_argument( + "--callable", + help="For example: `client:flower` or `project.package.module:wrapper.flower`", + ) + parser.add_argument( + "--callable-dir", + default="", + help="Add specified directory to the PYTHONPATH and load callable from there." + " Default: current working directory.", ) return parser @@ -84,6 +113,7 @@ def _check_actionable_client( def start_client( *, server_address: str, + load_callable_fn: Optional[Callable[[], Flower]] = None, client_fn: Optional[ClientFn] = None, client: Optional[Client] = None, grpc_max_message_length: int = GRPC_MAX_MESSAGE_LENGTH, @@ -98,6 +128,8 @@ def start_client( The IPv4 or IPv6 address of the server. If the Flower server runs on the same machine on port 8080, then `server_address` would be `"[::]:8080"`. + load_callable_fn : Optional[Callable[[], Flower]] (default: None) + ... client_fn : Optional[ClientFn] A callable that instantiates a Client. (default: None) client : Optional[flwr.client.Client] @@ -146,20 +178,31 @@ class `flwr.client.Client` (default: None) """ event(EventType.START_CLIENT_ENTER) - _check_actionable_client(client, client_fn) + if load_callable_fn is None: + _check_actionable_client(client, client_fn) - if client_fn is None: - # Wrap `Client` instance in `client_fn` - def single_client_factory( - cid: str, # pylint: disable=unused-argument - ) -> Client: - if client is None: # Added this to keep mypy happy - raise Exception( - "Both `client_fn` and `client` are `None`, but one is required" - ) - return client # Always return the same instance + if client_fn is None: + # Wrap `Client` instance in `client_fn` + def single_client_factory( + cid: str, # pylint: disable=unused-argument + ) -> Client: + if client is None: # Added this to keep mypy happy + raise Exception( + "Both `client_fn` and `client` are `None`, but one is required" + ) + return client # Always return the same instance + + client_fn = single_client_factory + + def _load_app() -> Flower: + return Flower(client_fn=client_fn) - client_fn = single_client_factory + load_callable_fn = _load_app + else: + warn_experimental_feature("`load_callable_fn`") + + # At this point, only `load_callable_fn` should be used + # Both `client` and `client_fn` must not be used directly # Initialize connection context manager connection, address = _init_connection(transport, server_address) @@ -190,11 +233,18 @@ def single_client_factory( send(task_res) break + # Load app + app: Flower = load_callable_fn() + # Handle task message - task_res = handle(client_fn, task_ins) + fwd_msg: Fwd = Fwd( + task_ins=task_ins, + state=WorkloadState(state={}), + ) + bwd_msg: Bwd = app(fwd=fwd_msg) # Send - send(task_res) + send(bwd_msg.task_res) # Unregister node if delete_node is not None: diff --git a/src/py/flwr/client/flower.py b/src/py/flwr/client/flower.py new file mode 100644 index 000000000000..9eeb41887e24 --- /dev/null +++ b/src/py/flwr/client/flower.py @@ -0,0 +1,138 @@ +# Copyright 2023 Flower Labs GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower callable.""" + + +import importlib +from dataclasses import dataclass +from typing import Callable, cast + +from flwr.client.message_handler.message_handler import handle +from flwr.client.typing import ClientFn +from flwr.client.workload_state import WorkloadState +from flwr.proto.task_pb2 import TaskIns, TaskRes + + +@dataclass +class Fwd: + """.""" + + task_ins: TaskIns + state: WorkloadState + + +@dataclass +class Bwd: + """.""" + + task_res: TaskRes + state: WorkloadState + + +FlowerCallable = Callable[[Fwd], Bwd] + + +class Flower: + """Flower callable. + + Examples + -------- + Assuming a typical client implementation in `FlowerClient`, you can wrap it in a + Flower callable as follows: + + >>> class FlowerClient(NumPyClient): + >>> # ... + >>> + >>> def client_fn(cid): + >>> return FlowerClient().to_client() + >>> + >>> flower = Flower(client_fn) + + If the above code is in a Python module called `client`, it can be started as + follows: + + >>> flower-client --callable client:flower + + In this `client:flower` example, `client` refers to the Python module in which the + previous code lives in. `flower` refers to the global attribute `flower` that points + to an object of type `Flower` (a Flower callable). + """ + + def __init__( + self, + client_fn: ClientFn, # Only for backward compatibility + ) -> None: + self.client_fn = client_fn + + def __call__(self, fwd: Fwd) -> Bwd: + """.""" + # Execute the task + task_res = handle( + client_fn=self.client_fn, + task_ins=fwd.task_ins, + ) + return Bwd( + task_res=task_res, + state=WorkloadState(state={}), + ) + + +class LoadCallableError(Exception): + """.""" + + +def load_callable(module_attribute_str: str) -> Flower: + """Load the `Flower` object specified in a module attribute string. + + The module/attribute string should have the form :. Valid + examples include `client:flower` and `project.package.module:wrapper.flower`. It + must refer to a module on the PYTHONPATH, the module needs to have the specified + attribute, and the attribute must be of type `Flower`. + """ + module_str, _, attributes_str = module_attribute_str.partition(":") + if not module_str: + raise LoadCallableError( + f"Missing module in {module_attribute_str}", + ) from None + if not attributes_str: + raise LoadCallableError( + f"Missing attribute in {module_attribute_str}", + ) from None + + # Load module + try: + module = importlib.import_module(module_str) + except ModuleNotFoundError: + raise LoadCallableError( + f"Unable to load module {module_str}", + ) from None + + # Recursively load attribute + attribute = module + try: + for attribute_str in attributes_str.split("."): + attribute = getattr(attribute, attribute_str) + except AttributeError: + raise LoadCallableError( + f"Unable to load attribute {attributes_str} from module {module_str}", + ) from None + + # Check type + if not isinstance(attribute, Flower): + raise LoadCallableError( + f"Attribute {attributes_str} is not of type {Flower}", + ) from None + + return cast(Flower, attribute) diff --git a/src/py/flwr/flower/__init__.py b/src/py/flwr/flower/__init__.py new file mode 100644 index 000000000000..090c78062d02 --- /dev/null +++ b/src/py/flwr/flower/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2020 Adap GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower callable package.""" + + +from flwr.client.flower import Bwd as Bwd +from flwr.client.flower import Flower as Flower +from flwr.client.flower import Fwd as Fwd + +__all__ = [ + "Flower", + "Fwd", + "Bwd", +]